var/home/core/zuul-output/0000755000175000017500000000000015117203317014525 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015117215106015470 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005356062115117215076017712 0ustar rootrootDec 13 06:29:25 crc systemd[1]: Starting Kubernetes Kubelet... Dec 13 06:29:25 crc restorecon[4703]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:25 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 13 06:29:26 crc restorecon[4703]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 13 06:29:26 crc restorecon[4703]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Dec 13 06:29:26 crc kubenswrapper[5048]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 13 06:29:26 crc kubenswrapper[5048]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Dec 13 06:29:26 crc kubenswrapper[5048]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 13 06:29:26 crc kubenswrapper[5048]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 13 06:29:26 crc kubenswrapper[5048]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Dec 13 06:29:26 crc kubenswrapper[5048]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.374287 5048 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377579 5048 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377703 5048 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377714 5048 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377719 5048 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377725 5048 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377731 5048 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377735 5048 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377740 5048 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377744 5048 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377749 5048 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377753 5048 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377757 5048 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377761 5048 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377765 5048 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377769 5048 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377774 5048 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377786 5048 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377791 5048 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377795 5048 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377799 5048 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377804 5048 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377808 5048 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377812 5048 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377816 5048 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377821 5048 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377825 5048 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377829 5048 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377833 5048 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377836 5048 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377841 5048 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377846 5048 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377852 5048 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377857 5048 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377862 5048 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377866 5048 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377870 5048 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377874 5048 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377878 5048 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377883 5048 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377887 5048 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377893 5048 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377898 5048 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377902 5048 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377906 5048 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377910 5048 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377914 5048 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377918 5048 feature_gate.go:330] unrecognized feature gate: Example Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377922 5048 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377926 5048 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377930 5048 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377934 5048 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377938 5048 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377945 5048 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377949 5048 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377952 5048 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377957 5048 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377961 5048 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377967 5048 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377973 5048 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377979 5048 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377985 5048 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377990 5048 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.377995 5048 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.378000 5048 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.378005 5048 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.378011 5048 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.378018 5048 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.378024 5048 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.378029 5048 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.378034 5048 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.378040 5048 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378357 5048 flags.go:64] FLAG: --address="0.0.0.0" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378389 5048 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378401 5048 flags.go:64] FLAG: --anonymous-auth="true" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378409 5048 flags.go:64] FLAG: --application-metrics-count-limit="100" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378418 5048 flags.go:64] FLAG: --authentication-token-webhook="false" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378424 5048 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378454 5048 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378468 5048 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378474 5048 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378480 5048 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378486 5048 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378492 5048 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378498 5048 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378504 5048 flags.go:64] FLAG: --cgroup-root="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378510 5048 flags.go:64] FLAG: --cgroups-per-qos="true" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378516 5048 flags.go:64] FLAG: --client-ca-file="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378521 5048 flags.go:64] FLAG: --cloud-config="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378529 5048 flags.go:64] FLAG: --cloud-provider="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378534 5048 flags.go:64] FLAG: --cluster-dns="[]" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378543 5048 flags.go:64] FLAG: --cluster-domain="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378549 5048 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378555 5048 flags.go:64] FLAG: --config-dir="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378560 5048 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378567 5048 flags.go:64] FLAG: --container-log-max-files="5" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378576 5048 flags.go:64] FLAG: --container-log-max-size="10Mi" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378581 5048 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378587 5048 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378593 5048 flags.go:64] FLAG: --containerd-namespace="k8s.io" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378599 5048 flags.go:64] FLAG: --contention-profiling="false" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378605 5048 flags.go:64] FLAG: --cpu-cfs-quota="true" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378610 5048 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378616 5048 flags.go:64] FLAG: --cpu-manager-policy="none" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378622 5048 flags.go:64] FLAG: --cpu-manager-policy-options="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378634 5048 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378640 5048 flags.go:64] FLAG: --enable-controller-attach-detach="true" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378648 5048 flags.go:64] FLAG: --enable-debugging-handlers="true" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378653 5048 flags.go:64] FLAG: --enable-load-reader="false" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378659 5048 flags.go:64] FLAG: --enable-server="true" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378665 5048 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378675 5048 flags.go:64] FLAG: --event-burst="100" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378682 5048 flags.go:64] FLAG: --event-qps="50" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378687 5048 flags.go:64] FLAG: --event-storage-age-limit="default=0" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378692 5048 flags.go:64] FLAG: --event-storage-event-limit="default=0" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378698 5048 flags.go:64] FLAG: --eviction-hard="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378706 5048 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378711 5048 flags.go:64] FLAG: --eviction-minimum-reclaim="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378717 5048 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378723 5048 flags.go:64] FLAG: --eviction-soft="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378728 5048 flags.go:64] FLAG: --eviction-soft-grace-period="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378733 5048 flags.go:64] FLAG: --exit-on-lock-contention="false" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378739 5048 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378744 5048 flags.go:64] FLAG: --experimental-mounter-path="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378749 5048 flags.go:64] FLAG: --fail-cgroupv1="false" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378756 5048 flags.go:64] FLAG: --fail-swap-on="true" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378762 5048 flags.go:64] FLAG: --feature-gates="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378769 5048 flags.go:64] FLAG: --file-check-frequency="20s" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378774 5048 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378780 5048 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378785 5048 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378791 5048 flags.go:64] FLAG: --healthz-port="10248" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378797 5048 flags.go:64] FLAG: --help="false" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378802 5048 flags.go:64] FLAG: --hostname-override="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378807 5048 flags.go:64] FLAG: --housekeeping-interval="10s" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378813 5048 flags.go:64] FLAG: --http-check-frequency="20s" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378818 5048 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378827 5048 flags.go:64] FLAG: --image-credential-provider-config="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378832 5048 flags.go:64] FLAG: --image-gc-high-threshold="85" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378838 5048 flags.go:64] FLAG: --image-gc-low-threshold="80" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378843 5048 flags.go:64] FLAG: --image-service-endpoint="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378849 5048 flags.go:64] FLAG: --kernel-memcg-notification="false" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378854 5048 flags.go:64] FLAG: --kube-api-burst="100" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378859 5048 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378865 5048 flags.go:64] FLAG: --kube-api-qps="50" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378871 5048 flags.go:64] FLAG: --kube-reserved="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378876 5048 flags.go:64] FLAG: --kube-reserved-cgroup="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378881 5048 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378887 5048 flags.go:64] FLAG: --kubelet-cgroups="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378892 5048 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378897 5048 flags.go:64] FLAG: --lock-file="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378902 5048 flags.go:64] FLAG: --log-cadvisor-usage="false" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378908 5048 flags.go:64] FLAG: --log-flush-frequency="5s" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378913 5048 flags.go:64] FLAG: --log-json-info-buffer-size="0" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378922 5048 flags.go:64] FLAG: --log-json-split-stream="false" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378927 5048 flags.go:64] FLAG: --log-text-info-buffer-size="0" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378933 5048 flags.go:64] FLAG: --log-text-split-stream="false" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378938 5048 flags.go:64] FLAG: --logging-format="text" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378944 5048 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378950 5048 flags.go:64] FLAG: --make-iptables-util-chains="true" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378955 5048 flags.go:64] FLAG: --manifest-url="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378962 5048 flags.go:64] FLAG: --manifest-url-header="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378971 5048 flags.go:64] FLAG: --max-housekeeping-interval="15s" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378977 5048 flags.go:64] FLAG: --max-open-files="1000000" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378984 5048 flags.go:64] FLAG: --max-pods="110" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378989 5048 flags.go:64] FLAG: --maximum-dead-containers="-1" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378994 5048 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.378999 5048 flags.go:64] FLAG: --memory-manager-policy="None" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379005 5048 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379010 5048 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379016 5048 flags.go:64] FLAG: --node-ip="192.168.126.11" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379021 5048 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379035 5048 flags.go:64] FLAG: --node-status-max-images="50" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379041 5048 flags.go:64] FLAG: --node-status-update-frequency="10s" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379047 5048 flags.go:64] FLAG: --oom-score-adj="-999" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379052 5048 flags.go:64] FLAG: --pod-cidr="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379058 5048 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379069 5048 flags.go:64] FLAG: --pod-manifest-path="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379074 5048 flags.go:64] FLAG: --pod-max-pids="-1" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379080 5048 flags.go:64] FLAG: --pods-per-core="0" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379086 5048 flags.go:64] FLAG: --port="10250" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379092 5048 flags.go:64] FLAG: --protect-kernel-defaults="false" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379097 5048 flags.go:64] FLAG: --provider-id="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379103 5048 flags.go:64] FLAG: --qos-reserved="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379109 5048 flags.go:64] FLAG: --read-only-port="10255" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379114 5048 flags.go:64] FLAG: --register-node="true" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379120 5048 flags.go:64] FLAG: --register-schedulable="true" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379126 5048 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379137 5048 flags.go:64] FLAG: --registry-burst="10" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379366 5048 flags.go:64] FLAG: --registry-qps="5" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379374 5048 flags.go:64] FLAG: --reserved-cpus="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379380 5048 flags.go:64] FLAG: --reserved-memory="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379387 5048 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379393 5048 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379399 5048 flags.go:64] FLAG: --rotate-certificates="false" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379406 5048 flags.go:64] FLAG: --rotate-server-certificates="false" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379411 5048 flags.go:64] FLAG: --runonce="false" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379418 5048 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379424 5048 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379430 5048 flags.go:64] FLAG: --seccomp-default="false" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379436 5048 flags.go:64] FLAG: --serialize-image-pulls="true" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379460 5048 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379468 5048 flags.go:64] FLAG: --storage-driver-db="cadvisor" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379475 5048 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379481 5048 flags.go:64] FLAG: --storage-driver-password="root" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379486 5048 flags.go:64] FLAG: --storage-driver-secure="false" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379492 5048 flags.go:64] FLAG: --storage-driver-table="stats" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379497 5048 flags.go:64] FLAG: --storage-driver-user="root" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379503 5048 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379509 5048 flags.go:64] FLAG: --sync-frequency="1m0s" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379515 5048 flags.go:64] FLAG: --system-cgroups="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379520 5048 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379530 5048 flags.go:64] FLAG: --system-reserved-cgroup="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379536 5048 flags.go:64] FLAG: --tls-cert-file="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379542 5048 flags.go:64] FLAG: --tls-cipher-suites="[]" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379550 5048 flags.go:64] FLAG: --tls-min-version="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379556 5048 flags.go:64] FLAG: --tls-private-key-file="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379561 5048 flags.go:64] FLAG: --topology-manager-policy="none" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379567 5048 flags.go:64] FLAG: --topology-manager-policy-options="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379572 5048 flags.go:64] FLAG: --topology-manager-scope="container" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379579 5048 flags.go:64] FLAG: --v="2" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379587 5048 flags.go:64] FLAG: --version="false" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379595 5048 flags.go:64] FLAG: --vmodule="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379603 5048 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.379609 5048 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379760 5048 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379769 5048 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379776 5048 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379782 5048 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379787 5048 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379792 5048 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379796 5048 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379801 5048 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379807 5048 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379812 5048 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379817 5048 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379822 5048 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379826 5048 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379830 5048 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379835 5048 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379840 5048 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379845 5048 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379849 5048 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379854 5048 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379858 5048 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379862 5048 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379867 5048 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379872 5048 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379876 5048 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379880 5048 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379885 5048 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379890 5048 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379894 5048 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379898 5048 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379903 5048 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379907 5048 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379912 5048 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379916 5048 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379921 5048 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379926 5048 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379931 5048 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379936 5048 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379941 5048 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379945 5048 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379950 5048 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379954 5048 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379973 5048 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379980 5048 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379986 5048 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379992 5048 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.379998 5048 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380004 5048 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380009 5048 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380013 5048 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380018 5048 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380023 5048 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380028 5048 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380032 5048 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380037 5048 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380041 5048 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380046 5048 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380050 5048 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380056 5048 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380062 5048 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380066 5048 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380071 5048 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380076 5048 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380081 5048 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380085 5048 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380090 5048 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380094 5048 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380101 5048 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380105 5048 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380110 5048 feature_gate.go:330] unrecognized feature gate: Example Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380117 5048 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.380123 5048 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.380419 5048 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.388604 5048 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.388674 5048 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388760 5048 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388775 5048 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388781 5048 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388787 5048 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388793 5048 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388797 5048 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388801 5048 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388805 5048 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388809 5048 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388813 5048 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388817 5048 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388821 5048 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388825 5048 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388830 5048 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388834 5048 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388841 5048 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388850 5048 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388855 5048 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388860 5048 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388865 5048 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388870 5048 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388876 5048 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388882 5048 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388888 5048 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388894 5048 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388900 5048 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388906 5048 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388911 5048 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388916 5048 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388921 5048 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388926 5048 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388932 5048 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388938 5048 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388943 5048 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388950 5048 feature_gate.go:330] unrecognized feature gate: Example Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388955 5048 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388960 5048 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388964 5048 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388969 5048 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388973 5048 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388978 5048 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388983 5048 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388987 5048 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388992 5048 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.388997 5048 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389001 5048 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389005 5048 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389008 5048 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389012 5048 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389016 5048 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389019 5048 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389023 5048 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389027 5048 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389031 5048 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389035 5048 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389039 5048 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389045 5048 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389048 5048 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389052 5048 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389058 5048 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389065 5048 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389069 5048 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389073 5048 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389078 5048 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389082 5048 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389086 5048 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389090 5048 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389094 5048 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389098 5048 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389103 5048 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389110 5048 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.389118 5048 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389248 5048 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389257 5048 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389262 5048 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389267 5048 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389272 5048 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389277 5048 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389282 5048 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389287 5048 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389292 5048 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389299 5048 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389303 5048 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389307 5048 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389311 5048 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389315 5048 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389320 5048 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389329 5048 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389333 5048 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389338 5048 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389342 5048 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389346 5048 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389350 5048 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389354 5048 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389359 5048 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389363 5048 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389368 5048 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389372 5048 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389376 5048 feature_gate.go:330] unrecognized feature gate: Example Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389380 5048 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389385 5048 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389389 5048 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389395 5048 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389399 5048 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389404 5048 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389409 5048 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389414 5048 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389419 5048 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389425 5048 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389434 5048 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389439 5048 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389470 5048 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389475 5048 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389480 5048 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389485 5048 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389490 5048 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389495 5048 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389499 5048 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389506 5048 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389512 5048 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389517 5048 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389521 5048 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389526 5048 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389529 5048 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389533 5048 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389537 5048 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389541 5048 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389544 5048 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389548 5048 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389552 5048 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389555 5048 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389559 5048 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389562 5048 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389566 5048 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389570 5048 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389573 5048 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389577 5048 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389581 5048 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389585 5048 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389589 5048 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389593 5048 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389597 5048 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.389609 5048 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.389617 5048 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.390088 5048 server.go:940] "Client rotation is on, will bootstrap in background" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.392834 5048 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.392938 5048 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.393387 5048 server.go:997] "Starting client certificate rotation" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.393416 5048 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.394891 5048 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-14 16:59:27.87894274 +0000 UTC Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.404117 5048 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 34h30m1.474843527s for next certificate rotation Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.415512 5048 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.417248 5048 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.424809 5048 log.go:25] "Validated CRI v1 runtime API" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.449880 5048 log.go:25] "Validated CRI v1 image API" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.451202 5048 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.455005 5048 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-12-13-06-15-42-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.455077 5048 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:41 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:42 fsType:tmpfs blockSize:0}] Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.471664 5048 manager.go:217] Machine: {Timestamp:2025-12-13 06:29:26.47041746 +0000 UTC m=+0.337012061 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:c40dcd55-9053-46a7-9f70-890f2d5d7520 BootID:6aaa1fdd-aec3-41cf-bed2-ae7f1a625255 Filesystems:[{Device:/run/user/1000 DeviceMajor:0 DeviceMinor:41 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:42 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:89:78:28 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:89:78:28 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:d8:cc:4f Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:b5:b6:b9 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:8c:9b:df Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:28:bc:34 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:4e:18:0f:59:25:2e Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:4a:8c:8d:75:30:83 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.471917 5048 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.472139 5048 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.472811 5048 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.473086 5048 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.473135 5048 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.473412 5048 topology_manager.go:138] "Creating topology manager with none policy" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.473425 5048 container_manager_linux.go:303] "Creating device plugin manager" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.473745 5048 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.473787 5048 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.473998 5048 state_mem.go:36] "Initialized new in-memory state store" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.474108 5048 server.go:1245] "Using root directory" path="/var/lib/kubelet" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.479727 5048 kubelet.go:418] "Attempting to sync node with API server" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.479761 5048 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.479789 5048 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.479804 5048 kubelet.go:324] "Adding apiserver pod source" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.479818 5048 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.486908 5048 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.487517 5048 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.496682 5048 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.498368 5048 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.251:6443: connect: connection refused Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.498394 5048 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.251:6443: connect: connection refused Dec 13 06:29:26 crc kubenswrapper[5048]: E1213 06:29:26.498515 5048 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.251:6443: connect: connection refused" logger="UnhandledError" Dec 13 06:29:26 crc kubenswrapper[5048]: E1213 06:29:26.498617 5048 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.251:6443: connect: connection refused" logger="UnhandledError" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.498395 5048 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.499646 5048 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.499687 5048 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.499700 5048 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.499725 5048 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.499740 5048 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.499784 5048 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.499803 5048 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.499819 5048 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.499831 5048 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.499847 5048 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.499857 5048 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.500144 5048 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.501031 5048 server.go:1280] "Started kubelet" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.501297 5048 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.501569 5048 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.251:6443: connect: connection refused Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.501687 5048 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.502014 5048 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Dec 13 06:29:26 crc systemd[1]: Started Kubernetes Kubelet. Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.503592 5048 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.503632 5048 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.504001 5048 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 19:32:21.78812894 +0000 UTC Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.504045 5048 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 157h2m55.28408663s for next certificate rotation Dec 13 06:29:26 crc kubenswrapper[5048]: E1213 06:29:26.503876 5048 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.251:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.1880b28b951a8190 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-13 06:29:26.500974992 +0000 UTC m=+0.367569583,LastTimestamp:2025-12-13 06:29:26.500974992 +0000 UTC m=+0.367569583,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 13 06:29:26 crc kubenswrapper[5048]: E1213 06:29:26.506547 5048 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.506582 5048 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.506568 5048 volume_manager.go:287] "The desired_state_of_world populator starts" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.506606 5048 volume_manager.go:289] "Starting Kubelet Volume Manager" Dec 13 06:29:26 crc kubenswrapper[5048]: E1213 06:29:26.507161 5048 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="200ms" Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.507386 5048 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.251:6443: connect: connection refused Dec 13 06:29:26 crc kubenswrapper[5048]: E1213 06:29:26.507488 5048 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.251:6443: connect: connection refused" logger="UnhandledError" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.508606 5048 factory.go:55] Registering systemd factory Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.508635 5048 factory.go:221] Registration of the systemd container factory successfully Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.509611 5048 factory.go:153] Registering CRI-O factory Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.509771 5048 factory.go:221] Registration of the crio container factory successfully Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.509989 5048 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.510149 5048 factory.go:103] Registering Raw factory Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.510259 5048 manager.go:1196] Started watching for new ooms in manager Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.510487 5048 server.go:460] "Adding debug handlers to kubelet server" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.511821 5048 manager.go:319] Starting recovery of all containers Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525000 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525094 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525112 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525127 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525143 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525160 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525173 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525188 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525204 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525217 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525233 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525247 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525261 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525278 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525293 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525309 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525325 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525339 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525352 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525364 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525375 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525392 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525408 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525423 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.525473 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.527812 5048 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.527852 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.527892 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.527911 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.527955 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.527971 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.527986 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528000 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528017 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528037 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528051 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528064 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528076 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528110 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528123 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528137 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528150 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528165 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528178 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528193 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528205 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528234 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528246 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528261 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528275 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528289 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528302 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528316 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528351 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528379 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528391 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528404 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528419 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528431 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528480 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528493 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528507 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528653 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528671 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528689 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528701 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528716 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528779 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528798 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528816 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528855 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528867 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528908 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528921 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.528935 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529009 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529031 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529054 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529091 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529105 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529119 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529133 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529148 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529161 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529174 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529187 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529222 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529236 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529248 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529263 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529276 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529289 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529303 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529317 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529344 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529356 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529369 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529383 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529395 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529418 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529431 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529462 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529492 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529505 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529520 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529594 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529608 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529627 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529639 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529652 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529682 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529696 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529706 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529720 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529732 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529744 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529757 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529769 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529797 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529809 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529821 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529869 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529882 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529894 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529904 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529914 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529940 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529950 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529963 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529974 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529987 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.529999 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530011 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530023 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530052 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530068 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530099 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530120 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530136 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530150 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530165 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530180 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530214 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530227 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530239 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530250 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530262 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530274 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530317 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530328 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530368 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530395 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530407 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530438 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530467 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530480 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530500 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530520 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530556 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530625 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530649 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530661 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530672 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530684 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530696 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530706 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530724 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530738 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530751 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530764 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530777 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530790 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530809 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530827 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530845 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530857 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530876 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530889 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530901 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530915 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530929 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530940 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530953 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530966 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530978 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.530999 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531011 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531024 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531035 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531048 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531060 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531072 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531083 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531095 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531108 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531119 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531131 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531145 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531159 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531173 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531192 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531206 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531224 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531236 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531247 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531260 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531272 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531283 5048 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531293 5048 reconstruct.go:97] "Volume reconstruction finished" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.531301 5048 reconciler.go:26] "Reconciler: start to sync state" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.543736 5048 manager.go:324] Recovery completed Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.556872 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.559576 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.559944 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.560070 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.561240 5048 cpu_manager.go:225] "Starting CPU manager" policy="none" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.561286 5048 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.561342 5048 state_mem.go:36] "Initialized new in-memory state store" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.562113 5048 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.565134 5048 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.565218 5048 status_manager.go:217] "Starting to sync pod status with apiserver" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.565564 5048 kubelet.go:2335] "Starting kubelet main sync loop" Dec 13 06:29:26 crc kubenswrapper[5048]: E1213 06:29:26.565631 5048 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Dec 13 06:29:26 crc kubenswrapper[5048]: W1213 06:29:26.566959 5048 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.251:6443: connect: connection refused Dec 13 06:29:26 crc kubenswrapper[5048]: E1213 06:29:26.567091 5048 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.251:6443: connect: connection refused" logger="UnhandledError" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.579870 5048 policy_none.go:49] "None policy: Start" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.591120 5048 memory_manager.go:170] "Starting memorymanager" policy="None" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.591198 5048 state_mem.go:35] "Initializing new in-memory state store" Dec 13 06:29:26 crc kubenswrapper[5048]: E1213 06:29:26.606709 5048 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 13 06:29:26 crc kubenswrapper[5048]: E1213 06:29:26.666249 5048 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.673321 5048 manager.go:334] "Starting Device Plugin manager" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.673462 5048 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.673491 5048 server.go:79] "Starting device plugin registration server" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.674146 5048 eviction_manager.go:189] "Eviction manager: starting control loop" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.674178 5048 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.674828 5048 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.675036 5048 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.675051 5048 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Dec 13 06:29:26 crc kubenswrapper[5048]: E1213 06:29:26.681280 5048 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 13 06:29:26 crc kubenswrapper[5048]: E1213 06:29:26.708938 5048 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="400ms" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.775305 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.776893 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.776929 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.776938 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.776966 5048 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 13 06:29:26 crc kubenswrapper[5048]: E1213 06:29:26.777702 5048 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.251:6443: connect: connection refused" node="crc" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.867016 5048 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.867200 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.872585 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.872645 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.872656 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.872913 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.873256 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.873368 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.874299 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.874332 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.874341 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.874445 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.874669 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.874711 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.874842 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.874879 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.874892 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.875201 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.875229 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.875241 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.875340 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.875501 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.875561 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.875893 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.875931 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.875944 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.876005 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.876027 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.876039 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.876362 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.876380 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.876403 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.876414 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.876368 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.876488 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.877579 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.877604 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.877615 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.877619 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.877647 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.877659 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.877794 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.877819 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.879102 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.879123 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.879135 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.936979 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.937122 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.937169 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.937207 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.937260 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.937296 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.937322 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.937355 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.937381 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.937415 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.937485 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.937513 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.937543 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.937564 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.937584 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.978711 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.980823 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.981091 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.981179 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:26 crc kubenswrapper[5048]: I1213 06:29:26.981348 5048 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 13 06:29:26 crc kubenswrapper[5048]: E1213 06:29:26.982144 5048 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.251:6443: connect: connection refused" node="crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.038918 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.039253 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.039373 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.039489 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.039575 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.039261 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.039528 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.039309 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.039818 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.039927 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.040022 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.040059 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.040147 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.040234 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.040355 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.040469 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.040650 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.040704 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.040739 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.040774 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.040790 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.040807 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.040823 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.040844 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.040928 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.040950 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.040982 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.041004 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.041033 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.041309 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: E1213 06:29:27.110909 5048 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="800ms" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.205428 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.214958 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.229608 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.235133 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: W1213 06:29:27.241579 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-c6ae98706b61fa2e2fb76514aeaa6a796ae92efb7fcba28d1adf8ce6253cfb6a WatchSource:0}: Error finding container c6ae98706b61fa2e2fb76514aeaa6a796ae92efb7fcba28d1adf8ce6253cfb6a: Status 404 returned error can't find the container with id c6ae98706b61fa2e2fb76514aeaa6a796ae92efb7fcba28d1adf8ce6253cfb6a Dec 13 06:29:27 crc kubenswrapper[5048]: W1213 06:29:27.245253 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-83eb7c14e69501985bb5d3091e3a7b89c12fa6309e219b2b87ef6e154b935682 WatchSource:0}: Error finding container 83eb7c14e69501985bb5d3091e3a7b89c12fa6309e219b2b87ef6e154b935682: Status 404 returned error can't find the container with id 83eb7c14e69501985bb5d3091e3a7b89c12fa6309e219b2b87ef6e154b935682 Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.251982 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.382979 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.384923 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.384974 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.385013 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.385122 5048 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 13 06:29:27 crc kubenswrapper[5048]: E1213 06:29:27.386048 5048 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.251:6443: connect: connection refused" node="crc" Dec 13 06:29:27 crc kubenswrapper[5048]: W1213 06:29:27.401081 5048 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.251:6443: connect: connection refused Dec 13 06:29:27 crc kubenswrapper[5048]: E1213 06:29:27.401205 5048 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.251:6443: connect: connection refused" logger="UnhandledError" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.503019 5048 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.251:6443: connect: connection refused Dec 13 06:29:27 crc kubenswrapper[5048]: E1213 06:29:27.912652 5048 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="1.6s" Dec 13 06:29:27 crc kubenswrapper[5048]: W1213 06:29:27.912630 5048 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.251:6443: connect: connection refused Dec 13 06:29:27 crc kubenswrapper[5048]: E1213 06:29:27.912861 5048 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.251:6443: connect: connection refused" logger="UnhandledError" Dec 13 06:29:27 crc kubenswrapper[5048]: W1213 06:29:27.962718 5048 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.251:6443: connect: connection refused Dec 13 06:29:27 crc kubenswrapper[5048]: E1213 06:29:27.962868 5048 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.251:6443: connect: connection refused" logger="UnhandledError" Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.967370 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"83eb7c14e69501985bb5d3091e3a7b89c12fa6309e219b2b87ef6e154b935682"} Dec 13 06:29:27 crc kubenswrapper[5048]: I1213 06:29:27.968601 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c6ae98706b61fa2e2fb76514aeaa6a796ae92efb7fcba28d1adf8ce6253cfb6a"} Dec 13 06:29:27 crc kubenswrapper[5048]: W1213 06:29:27.986578 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-7f9ecdece888f7ba95cbb34cec6377b5231339b83ef8f0bf1853694c57bbddae WatchSource:0}: Error finding container 7f9ecdece888f7ba95cbb34cec6377b5231339b83ef8f0bf1853694c57bbddae: Status 404 returned error can't find the container with id 7f9ecdece888f7ba95cbb34cec6377b5231339b83ef8f0bf1853694c57bbddae Dec 13 06:29:27 crc kubenswrapper[5048]: W1213 06:29:27.987884 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-e927e7b4096dc76fcb11ca8ec9a96bd000a1e0e3d6a41fb5af0baee24f163309 WatchSource:0}: Error finding container e927e7b4096dc76fcb11ca8ec9a96bd000a1e0e3d6a41fb5af0baee24f163309: Status 404 returned error can't find the container with id e927e7b4096dc76fcb11ca8ec9a96bd000a1e0e3d6a41fb5af0baee24f163309 Dec 13 06:29:28 crc kubenswrapper[5048]: W1213 06:29:28.006068 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-9b81b72912973ce909d82f98334afd6f39e46fecb76a497567cc544031b37785 WatchSource:0}: Error finding container 9b81b72912973ce909d82f98334afd6f39e46fecb76a497567cc544031b37785: Status 404 returned error can't find the container with id 9b81b72912973ce909d82f98334afd6f39e46fecb76a497567cc544031b37785 Dec 13 06:29:28 crc kubenswrapper[5048]: W1213 06:29:28.035350 5048 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.251:6443: connect: connection refused Dec 13 06:29:28 crc kubenswrapper[5048]: E1213 06:29:28.035539 5048 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.251:6443: connect: connection refused" logger="UnhandledError" Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.186368 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.188253 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.188301 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.188313 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.188347 5048 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 13 06:29:28 crc kubenswrapper[5048]: E1213 06:29:28.189041 5048 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.251:6443: connect: connection refused" node="crc" Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.502574 5048 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.251:6443: connect: connection refused Dec 13 06:29:28 crc kubenswrapper[5048]: E1213 06:29:28.718391 5048 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.251:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.1880b28b951a8190 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-13 06:29:26.500974992 +0000 UTC m=+0.367569583,LastTimestamp:2025-12-13 06:29:26.500974992 +0000 UTC m=+0.367569583,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.972378 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa"} Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.972477 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"7f9ecdece888f7ba95cbb34cec6377b5231339b83ef8f0bf1853694c57bbddae"} Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.974479 5048 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72" exitCode=0 Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.974569 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.974658 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72"} Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.976203 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.976289 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.976301 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.976757 5048 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5" exitCode=0 Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.977019 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.977244 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5"} Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.978179 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.978223 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.978242 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.978705 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.979184 5048 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="2144a7f51ff3f41cfe760f2939d1b2d31ef9c9e4863fe8b26eaeda22a2f5ee23" exitCode=0 Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.979232 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"2144a7f51ff3f41cfe760f2939d1b2d31ef9c9e4863fe8b26eaeda22a2f5ee23"} Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.979251 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"9b81b72912973ce909d82f98334afd6f39e46fecb76a497567cc544031b37785"} Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.979325 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.979357 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.979368 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.981485 5048 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0" exitCode=0 Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.981518 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0"} Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.981536 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"e927e7b4096dc76fcb11ca8ec9a96bd000a1e0e3d6a41fb5af0baee24f163309"} Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.981608 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.982227 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.982255 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:28 crc kubenswrapper[5048]: I1213 06:29:28.982266 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:29 crc kubenswrapper[5048]: I1213 06:29:29.505831 5048 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.251:6443: connect: connection refused Dec 13 06:29:29 crc kubenswrapper[5048]: E1213 06:29:29.513566 5048 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="3.2s" Dec 13 06:29:29 crc kubenswrapper[5048]: W1213 06:29:29.675917 5048 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.251:6443: connect: connection refused Dec 13 06:29:29 crc kubenswrapper[5048]: E1213 06:29:29.676044 5048 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.251:6443: connect: connection refused" logger="UnhandledError" Dec 13 06:29:29 crc kubenswrapper[5048]: I1213 06:29:29.789855 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:29 crc kubenswrapper[5048]: I1213 06:29:29.794278 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:29 crc kubenswrapper[5048]: I1213 06:29:29.794348 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:29 crc kubenswrapper[5048]: I1213 06:29:29.794360 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:29 crc kubenswrapper[5048]: I1213 06:29:29.794400 5048 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 13 06:29:29 crc kubenswrapper[5048]: E1213 06:29:29.795865 5048 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.251:6443: connect: connection refused" node="crc" Dec 13 06:29:29 crc kubenswrapper[5048]: I1213 06:29:29.987590 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551"} Dec 13 06:29:29 crc kubenswrapper[5048]: I1213 06:29:29.988017 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8"} Dec 13 06:29:29 crc kubenswrapper[5048]: I1213 06:29:29.990001 5048 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a" exitCode=0 Dec 13 06:29:29 crc kubenswrapper[5048]: I1213 06:29:29.990162 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a"} Dec 13 06:29:29 crc kubenswrapper[5048]: I1213 06:29:29.990280 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:29 crc kubenswrapper[5048]: I1213 06:29:29.991811 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:29 crc kubenswrapper[5048]: I1213 06:29:29.991852 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:29 crc kubenswrapper[5048]: I1213 06:29:29.991868 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:29 crc kubenswrapper[5048]: I1213 06:29:29.992547 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"e6f921bd85ec5e159d59b8ee9e98026784f7ed189beea02e9f76fced30361160"} Dec 13 06:29:29 crc kubenswrapper[5048]: I1213 06:29:29.992641 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"6bdca655a7c7ffd64ba59fb2ba574a7fef8ff566bcf1c75776b7cd93d734b694"} Dec 13 06:29:29 crc kubenswrapper[5048]: I1213 06:29:29.995325 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:29 crc kubenswrapper[5048]: I1213 06:29:29.995409 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698"} Dec 13 06:29:29 crc kubenswrapper[5048]: I1213 06:29:29.995520 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d"} Dec 13 06:29:29 crc kubenswrapper[5048]: I1213 06:29:29.996149 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:29 crc kubenswrapper[5048]: I1213 06:29:29.996182 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:29 crc kubenswrapper[5048]: I1213 06:29:29.996194 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:30 crc kubenswrapper[5048]: W1213 06:29:30.191887 5048 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.251:6443: connect: connection refused Dec 13 06:29:30 crc kubenswrapper[5048]: E1213 06:29:30.192129 5048 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.251:6443: connect: connection refused" logger="UnhandledError" Dec 13 06:29:30 crc kubenswrapper[5048]: I1213 06:29:30.503119 5048 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.251:6443: connect: connection refused Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.000964 5048 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf" exitCode=0 Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.001049 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf"} Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.001260 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.002457 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.002487 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.002499 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.013997 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"3f8010716141c431c87894a8a22182ae06fd0671ad4bd6988888756f027a240d"} Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.014233 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.015645 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.015701 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.015715 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.017048 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"0db465510d0e1d5bf6f917f72d4b32608ab044e44bcf883bcfd9634d2d18ae4d"} Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.017159 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.018526 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.018578 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.018595 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.020750 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.020700 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28"} Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.022970 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.023021 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.023036 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.025285 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837"} Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.025335 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea"} Dec 13 06:29:31 crc kubenswrapper[5048]: I1213 06:29:31.264838 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 13 06:29:32 crc kubenswrapper[5048]: I1213 06:29:32.029054 5048 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 13 06:29:32 crc kubenswrapper[5048]: I1213 06:29:32.029112 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:32 crc kubenswrapper[5048]: I1213 06:29:32.029084 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:32 crc kubenswrapper[5048]: I1213 06:29:32.030544 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:32 crc kubenswrapper[5048]: I1213 06:29:32.030596 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:32 crc kubenswrapper[5048]: I1213 06:29:32.030610 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:32 crc kubenswrapper[5048]: I1213 06:29:32.031399 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:32 crc kubenswrapper[5048]: I1213 06:29:32.031429 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:32 crc kubenswrapper[5048]: I1213 06:29:32.031453 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:32 crc kubenswrapper[5048]: I1213 06:29:32.438784 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 13 06:29:32 crc kubenswrapper[5048]: I1213 06:29:32.996516 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:32 crc kubenswrapper[5048]: I1213 06:29:32.998346 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:32 crc kubenswrapper[5048]: I1213 06:29:32.998420 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:32 crc kubenswrapper[5048]: I1213 06:29:32.998453 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:32 crc kubenswrapper[5048]: I1213 06:29:32.998490 5048 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 13 06:29:33 crc kubenswrapper[5048]: I1213 06:29:33.035569 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f"} Dec 13 06:29:33 crc kubenswrapper[5048]: I1213 06:29:33.035671 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:33 crc kubenswrapper[5048]: I1213 06:29:33.036888 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:33 crc kubenswrapper[5048]: I1213 06:29:33.036939 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:33 crc kubenswrapper[5048]: I1213 06:29:33.036953 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:33 crc kubenswrapper[5048]: I1213 06:29:33.038835 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce"} Dec 13 06:29:33 crc kubenswrapper[5048]: I1213 06:29:33.038882 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:33 crc kubenswrapper[5048]: I1213 06:29:33.039709 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:33 crc kubenswrapper[5048]: I1213 06:29:33.039743 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:33 crc kubenswrapper[5048]: I1213 06:29:33.039764 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:33 crc kubenswrapper[5048]: I1213 06:29:33.041176 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:29:33 crc kubenswrapper[5048]: I1213 06:29:33.775571 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 13 06:29:33 crc kubenswrapper[5048]: I1213 06:29:33.775807 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:33 crc kubenswrapper[5048]: I1213 06:29:33.777179 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:33 crc kubenswrapper[5048]: I1213 06:29:33.777222 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:33 crc kubenswrapper[5048]: I1213 06:29:33.777242 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:34 crc kubenswrapper[5048]: I1213 06:29:34.047336 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9"} Dec 13 06:29:34 crc kubenswrapper[5048]: I1213 06:29:34.047417 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8"} Dec 13 06:29:34 crc kubenswrapper[5048]: I1213 06:29:34.047445 5048 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 13 06:29:34 crc kubenswrapper[5048]: I1213 06:29:34.047516 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:34 crc kubenswrapper[5048]: I1213 06:29:34.047452 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649"} Dec 13 06:29:34 crc kubenswrapper[5048]: I1213 06:29:34.047535 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:34 crc kubenswrapper[5048]: I1213 06:29:34.048868 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:34 crc kubenswrapper[5048]: I1213 06:29:34.048894 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:34 crc kubenswrapper[5048]: I1213 06:29:34.048916 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:34 crc kubenswrapper[5048]: I1213 06:29:34.048917 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:34 crc kubenswrapper[5048]: I1213 06:29:34.048935 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:34 crc kubenswrapper[5048]: I1213 06:29:34.048941 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:34 crc kubenswrapper[5048]: I1213 06:29:34.630263 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 13 06:29:34 crc kubenswrapper[5048]: I1213 06:29:34.635618 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 13 06:29:35 crc kubenswrapper[5048]: I1213 06:29:35.054669 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d"} Dec 13 06:29:35 crc kubenswrapper[5048]: I1213 06:29:35.054769 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:35 crc kubenswrapper[5048]: I1213 06:29:35.054797 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:35 crc kubenswrapper[5048]: I1213 06:29:35.054941 5048 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 13 06:29:35 crc kubenswrapper[5048]: I1213 06:29:35.055042 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:35 crc kubenswrapper[5048]: I1213 06:29:35.055966 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:35 crc kubenswrapper[5048]: I1213 06:29:35.055997 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:35 crc kubenswrapper[5048]: I1213 06:29:35.056011 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:35 crc kubenswrapper[5048]: I1213 06:29:35.056150 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:35 crc kubenswrapper[5048]: I1213 06:29:35.056186 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:35 crc kubenswrapper[5048]: I1213 06:29:35.056198 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:35 crc kubenswrapper[5048]: I1213 06:29:35.057023 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:35 crc kubenswrapper[5048]: I1213 06:29:35.057059 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:35 crc kubenswrapper[5048]: I1213 06:29:35.057068 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:35 crc kubenswrapper[5048]: I1213 06:29:35.144071 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:29:36 crc kubenswrapper[5048]: I1213 06:29:36.057502 5048 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 13 06:29:36 crc kubenswrapper[5048]: I1213 06:29:36.057573 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:36 crc kubenswrapper[5048]: I1213 06:29:36.057632 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:36 crc kubenswrapper[5048]: I1213 06:29:36.057574 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:36 crc kubenswrapper[5048]: I1213 06:29:36.059279 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:36 crc kubenswrapper[5048]: I1213 06:29:36.059322 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:36 crc kubenswrapper[5048]: I1213 06:29:36.059319 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:36 crc kubenswrapper[5048]: I1213 06:29:36.059366 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:36 crc kubenswrapper[5048]: I1213 06:29:36.059381 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:36 crc kubenswrapper[5048]: I1213 06:29:36.059383 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:36 crc kubenswrapper[5048]: I1213 06:29:36.059419 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:36 crc kubenswrapper[5048]: I1213 06:29:36.059453 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:36 crc kubenswrapper[5048]: I1213 06:29:36.059334 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:36 crc kubenswrapper[5048]: E1213 06:29:36.681424 5048 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 13 06:29:37 crc kubenswrapper[5048]: I1213 06:29:37.042536 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:29:37 crc kubenswrapper[5048]: I1213 06:29:37.059905 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:37 crc kubenswrapper[5048]: I1213 06:29:37.061028 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:37 crc kubenswrapper[5048]: I1213 06:29:37.061371 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:37 crc kubenswrapper[5048]: I1213 06:29:37.061488 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:39 crc kubenswrapper[5048]: I1213 06:29:39.756590 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Dec 13 06:29:39 crc kubenswrapper[5048]: I1213 06:29:39.756929 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:39 crc kubenswrapper[5048]: I1213 06:29:39.759317 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:39 crc kubenswrapper[5048]: I1213 06:29:39.759376 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:39 crc kubenswrapper[5048]: I1213 06:29:39.759396 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:39 crc kubenswrapper[5048]: I1213 06:29:39.862804 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 13 06:29:39 crc kubenswrapper[5048]: I1213 06:29:39.863166 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:39 crc kubenswrapper[5048]: I1213 06:29:39.864846 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:39 crc kubenswrapper[5048]: I1213 06:29:39.864884 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:39 crc kubenswrapper[5048]: I1213 06:29:39.864894 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:39 crc kubenswrapper[5048]: I1213 06:29:39.868220 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 13 06:29:40 crc kubenswrapper[5048]: I1213 06:29:40.069476 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:40 crc kubenswrapper[5048]: I1213 06:29:40.070831 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:40 crc kubenswrapper[5048]: I1213 06:29:40.070882 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:40 crc kubenswrapper[5048]: I1213 06:29:40.070895 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:40 crc kubenswrapper[5048]: W1213 06:29:40.689230 5048 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout Dec 13 06:29:40 crc kubenswrapper[5048]: I1213 06:29:40.689370 5048 trace.go:236] Trace[1016324397]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (13-Dec-2025 06:29:30.687) (total time: 10002ms): Dec 13 06:29:40 crc kubenswrapper[5048]: Trace[1016324397]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (06:29:40.689) Dec 13 06:29:40 crc kubenswrapper[5048]: Trace[1016324397]: [10.002048679s] [10.002048679s] END Dec 13 06:29:40 crc kubenswrapper[5048]: E1213 06:29:40.689401 5048 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Dec 13 06:29:40 crc kubenswrapper[5048]: W1213 06:29:40.710921 5048 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout Dec 13 06:29:40 crc kubenswrapper[5048]: I1213 06:29:40.711081 5048 trace.go:236] Trace[668291387]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (13-Dec-2025 06:29:30.708) (total time: 10002ms): Dec 13 06:29:40 crc kubenswrapper[5048]: Trace[668291387]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10002ms (06:29:40.710) Dec 13 06:29:40 crc kubenswrapper[5048]: Trace[668291387]: [10.002776161s] [10.002776161s] END Dec 13 06:29:40 crc kubenswrapper[5048]: E1213 06:29:40.711114 5048 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Dec 13 06:29:41 crc kubenswrapper[5048]: I1213 06:29:41.409286 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Dec 13 06:29:41 crc kubenswrapper[5048]: I1213 06:29:41.409574 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:41 crc kubenswrapper[5048]: I1213 06:29:41.411090 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:41 crc kubenswrapper[5048]: I1213 06:29:41.411170 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:41 crc kubenswrapper[5048]: I1213 06:29:41.411195 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:41 crc kubenswrapper[5048]: I1213 06:29:41.507136 5048 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Dec 13 06:29:42 crc kubenswrapper[5048]: E1213 06:29:42.714480 5048 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" interval="6.4s" Dec 13 06:29:42 crc kubenswrapper[5048]: I1213 06:29:42.862951 5048 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 13 06:29:42 crc kubenswrapper[5048]: I1213 06:29:42.863043 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 13 06:29:42 crc kubenswrapper[5048]: I1213 06:29:42.899560 5048 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Dec 13 06:29:42 crc kubenswrapper[5048]: I1213 06:29:42.899636 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Dec 13 06:29:42 crc kubenswrapper[5048]: I1213 06:29:42.907676 5048 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]log ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]etcd ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/openshift.io-api-request-count-filter ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/openshift.io-startkubeinformers ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/start-apiserver-admission-initializer ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/generic-apiserver-start-informers ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/priority-and-fairness-config-consumer ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/priority-and-fairness-filter ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/storage-object-count-tracker-hook ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/start-apiextensions-informers ok Dec 13 06:29:42 crc kubenswrapper[5048]: [-]poststarthook/start-apiextensions-controllers failed: reason withheld Dec 13 06:29:42 crc kubenswrapper[5048]: [-]poststarthook/crd-informer-synced failed: reason withheld Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/start-system-namespaces-controller ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/start-cluster-authentication-info-controller ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/start-legacy-token-tracking-controller ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/start-service-ip-repair-controllers ok Dec 13 06:29:42 crc kubenswrapper[5048]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Dec 13 06:29:42 crc kubenswrapper[5048]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Dec 13 06:29:42 crc kubenswrapper[5048]: [-]poststarthook/priority-and-fairness-config-producer failed: reason withheld Dec 13 06:29:42 crc kubenswrapper[5048]: [-]poststarthook/bootstrap-controller failed: reason withheld Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/start-kube-aggregator-informers ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/apiservice-status-local-available-controller ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/apiservice-status-remote-available-controller ok Dec 13 06:29:42 crc kubenswrapper[5048]: [-]poststarthook/apiservice-registration-controller failed: reason withheld Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/apiservice-wait-for-first-sync ok Dec 13 06:29:42 crc kubenswrapper[5048]: [-]poststarthook/apiservice-discovery-controller failed: reason withheld Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/kube-apiserver-autoregistration ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]autoregister-completion ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/apiservice-openapi-controller ok Dec 13 06:29:42 crc kubenswrapper[5048]: [+]poststarthook/apiservice-openapiv3-controller ok Dec 13 06:29:42 crc kubenswrapper[5048]: livez check failed Dec 13 06:29:42 crc kubenswrapper[5048]: I1213 06:29:42.907805 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 13 06:29:45 crc kubenswrapper[5048]: I1213 06:29:45.149342 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:29:45 crc kubenswrapper[5048]: I1213 06:29:45.150633 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:45 crc kubenswrapper[5048]: I1213 06:29:45.151168 5048 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Dec 13 06:29:45 crc kubenswrapper[5048]: I1213 06:29:45.151286 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Dec 13 06:29:45 crc kubenswrapper[5048]: I1213 06:29:45.152072 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:45 crc kubenswrapper[5048]: I1213 06:29:45.152234 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:45 crc kubenswrapper[5048]: I1213 06:29:45.152307 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:45 crc kubenswrapper[5048]: I1213 06:29:45.158479 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.085862 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.087194 5048 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.087270 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.087299 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.087312 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.087269 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.288899 5048 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.416703 5048 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.490014 5048 apiserver.go:52] "Watching apiserver" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.500414 5048 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.500714 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf"] Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.501150 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.501219 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.501260 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.501275 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.501284 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.501623 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:29:46 crc kubenswrapper[5048]: E1213 06:29:46.501859 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:29:46 crc kubenswrapper[5048]: E1213 06:29:46.501933 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:29:46 crc kubenswrapper[5048]: E1213 06:29:46.501961 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.504233 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.504300 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.504329 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.505220 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.505925 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.506220 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.506329 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.507211 5048 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.507616 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.510926 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.530823 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.547685 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.565815 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.580254 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.595202 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.610991 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.626832 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.643841 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.660494 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.672720 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.687950 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.701033 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 13 06:29:46 crc kubenswrapper[5048]: I1213 06:29:46.717582 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.043395 5048 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.043666 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.887357 5048 trace.go:236] Trace[1392887468]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (13-Dec-2025 06:29:34.250) (total time: 13636ms): Dec 13 06:29:47 crc kubenswrapper[5048]: Trace[1392887468]: ---"Objects listed" error: 13636ms (06:29:47.887) Dec 13 06:29:47 crc kubenswrapper[5048]: Trace[1392887468]: [13.636513378s] [13.636513378s] END Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.887404 5048 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Dec 13 06:29:47 crc kubenswrapper[5048]: E1213 06:29:47.888713 5048 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.888937 5048 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.890747 5048 trace.go:236] Trace[1062315669]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (13-Dec-2025 06:29:35.383) (total time: 12507ms): Dec 13 06:29:47 crc kubenswrapper[5048]: Trace[1062315669]: ---"Objects listed" error: 12507ms (06:29:47.890) Dec 13 06:29:47 crc kubenswrapper[5048]: Trace[1062315669]: [12.507080314s] [12.507080314s] END Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.890772 5048 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.989664 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.989749 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.989807 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.989845 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.989898 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.989951 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.990113 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.990349 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.990379 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.990412 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.991065 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.991139 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.991173 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.991203 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.991245 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.991286 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.991330 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.991364 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.991397 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.991457 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.991488 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.998494 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.997764 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.998175 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.998628 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.998638 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.998716 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.998752 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.998849 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.998897 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.998937 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.999063 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.999338 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.999383 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.999758 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:47 crc kubenswrapper[5048]: I1213 06:29:47.999846 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:47 crc kubenswrapper[5048]: E1213 06:29:47.999919 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:29:48.499883079 +0000 UTC m=+22.366477650 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:47.999765 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:47.999992 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.000018 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.000165 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.000721 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.000768 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.001002 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.001243 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.001011 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.001094 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.001626 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.002308 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.002923 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.002968 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.005516 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.005509 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.005731 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.005856 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.006151 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.006062 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.006061 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.006235 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.006464 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.006580 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.006689 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.006789 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.006923 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.007030 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.007147 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.007264 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.007379 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.007511 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.007624 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.007821 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.007933 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.008035 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.008132 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.008227 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.008335 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.006596 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.006982 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.007286 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.007807 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.007921 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.008242 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.008325 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.008375 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.008662 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.008399 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.008459 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.008768 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.008794 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.008874 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.008897 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.008919 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.008940 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.008936 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.008960 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009025 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009095 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009121 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009146 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009165 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009186 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009204 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009222 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009240 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009259 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009278 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009298 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009326 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009454 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009477 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009497 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009520 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009542 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009558 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009570 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009567 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009591 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009614 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009634 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009652 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009672 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009690 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009706 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009733 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009750 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009768 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009788 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009786 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009808 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009880 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009918 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009956 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.009984 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010016 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010044 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010070 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010094 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010118 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010140 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010172 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010207 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010228 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010245 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010262 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010279 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010294 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010311 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010326 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010343 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010369 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010388 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010408 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010455 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010487 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010515 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010548 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010568 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010586 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010606 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010629 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010651 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010676 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010701 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010728 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010752 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010779 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010797 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010813 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010867 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010896 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010920 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010945 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010966 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.010995 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011019 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011043 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011069 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011092 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011115 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011139 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011161 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011167 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011191 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011236 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011261 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011287 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011365 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011392 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011416 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011464 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011490 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011513 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011537 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011560 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011589 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011613 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011636 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011657 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011685 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011716 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011740 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011766 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011789 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.011814 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.017768 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.017795 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.017825 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.017857 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.017883 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.017912 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.017941 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.017968 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.017987 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.018008 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.018034 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.018055 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.018075 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.018093 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.018112 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.018130 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.018147 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.018379 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.018400 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.018419 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.018494 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.018594 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.018607 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.018655 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.018746 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.018758 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.018784 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.019101 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.019427 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.019537 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.019807 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.019945 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.019979 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.020181 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.020194 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.020569 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.020700 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.021191 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.021368 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.021553 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.021659 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.021898 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.022361 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.022398 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.022499 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.022537 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.022572 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.022576 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.022939 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.023004 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.023034 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.023057 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.023263 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.023291 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.023478 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.023730 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.023773 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.023784 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.023925 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.024227 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.024309 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.023844 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.024613 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.024633 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.024676 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.024707 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.024740 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.024768 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.024795 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.024826 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.024822 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.024857 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.024888 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.024920 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.024945 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.024973 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.024996 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025023 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025049 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025023 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025108 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025144 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025171 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025200 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025229 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025266 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025294 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025320 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025349 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025379 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025411 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025458 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025486 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025491 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025575 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025788 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025807 5048 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025826 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025842 5048 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025858 5048 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025873 5048 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025887 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025907 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025921 5048 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025938 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025949 5048 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025959 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025959 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025971 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025969 5048 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.025997 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.026051 5048 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.026077 5048 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.026097 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.026118 5048 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.026135 5048 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.026051 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.026106 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.026313 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.026419 5048 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.026490 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.026500 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.026511 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.026675 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027020 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027032 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.026736 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027188 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.026711 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.026687 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027292 5048 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027315 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027331 5048 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027356 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027373 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027388 5048 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027409 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027426 5048 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027458 5048 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027473 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027491 5048 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027636 5048 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027720 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027741 5048 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027755 5048 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027768 5048 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027782 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027793 5048 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027804 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027816 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027828 5048 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027838 5048 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027848 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027860 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027870 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027879 5048 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027890 5048 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027900 5048 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027911 5048 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027921 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028733 5048 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028750 5048 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028766 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028778 5048 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028792 5048 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028807 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028824 5048 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028836 5048 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028848 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028859 5048 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028870 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028880 5048 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028891 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028904 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028916 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028927 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028937 5048 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028947 5048 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028957 5048 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028968 5048 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028978 5048 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028988 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028998 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.029006 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.029017 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.029026 5048 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.029036 5048 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.029047 5048 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.029057 5048 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.029068 5048 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.029078 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.029090 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.029101 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.029112 5048 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.029121 5048 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.029130 5048 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.029140 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027513 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027517 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.029165 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.027588 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028009 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028039 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.028264 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.029225 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.029489 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.029589 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.029803 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.029970 5048 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.030038 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-13 06:29:48.530016943 +0000 UTC m=+22.396611524 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.030075 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.026503 5048 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.030221 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.030533 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.030540 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.030663 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.030722 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.030964 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.031209 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.031311 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.031645 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.031864 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.031939 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.032273 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.032540 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.035821 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.036277 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.037076 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.037166 5048 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.037759 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-13 06:29:48.537735053 +0000 UTC m=+22.404329634 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.044208 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.045049 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.045136 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.045214 5048 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.046596 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-13 06:29:48.546571935 +0000 UTC m=+22.413166516 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.045519 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.046484 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.049595 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.052509 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.052724 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.053080 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.052797 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.053167 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.053346 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.053382 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.053548 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.054191 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.054308 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.054842 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.054967 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.055762 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.055804 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.056124 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.056381 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.056778 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.057102 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.057272 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.057469 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.057502 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.057739 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.057872 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.057898 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.057976 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.058249 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.058491 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.058565 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.058734 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.058759 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.058773 5048 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.058920 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-13 06:29:48.558809279 +0000 UTC m=+22.425403860 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.059004 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.059012 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.059293 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.063902 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.063999 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.067770 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.068502 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.068517 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.068675 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.069527 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.069871 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.070643 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.071209 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.071320 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.071351 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.071269 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.071412 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.073604 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.073980 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.075492 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.076341 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.076516 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.077449 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.077536 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.077484 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.077593 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.082848 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.083001 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.083208 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.083378 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.083672 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.083674 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.083817 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.084664 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.085203 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.085691 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.086783 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.087048 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.112284 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.112758 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.130788 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.130854 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.130937 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.131102 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.131586 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.131337 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.131764 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.131786 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.131825 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.131841 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.131865 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.131899 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.131915 5048 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.131927 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.131938 5048 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.131950 5048 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.131982 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.131994 5048 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132005 5048 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132018 5048 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132029 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132061 5048 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132072 5048 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132083 5048 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132094 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132106 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132138 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132152 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132166 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132181 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132216 5048 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132232 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132245 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132259 5048 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132300 5048 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132339 5048 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132351 5048 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132363 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132375 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132386 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132419 5048 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132454 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132466 5048 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132480 5048 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132493 5048 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132506 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132518 5048 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132536 5048 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132550 5048 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132562 5048 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132574 5048 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132585 5048 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132596 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132608 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132619 5048 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132631 5048 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132642 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132653 5048 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132665 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132677 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132690 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132702 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132726 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132739 5048 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132753 5048 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132764 5048 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132775 5048 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132786 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132797 5048 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132807 5048 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132820 5048 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132832 5048 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132844 5048 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132855 5048 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132866 5048 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132880 5048 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132892 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132904 5048 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132916 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132927 5048 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132937 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132947 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132957 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132969 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132978 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132989 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.132999 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.133009 5048 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.133019 5048 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.133029 5048 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.133043 5048 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.133054 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.133067 5048 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.133078 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.133092 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.133102 5048 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.133111 5048 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.133121 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.133132 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.133142 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.133153 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.133164 5048 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.133174 5048 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.133184 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.133196 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.133207 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.133221 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.133232 5048 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.133243 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.316130 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.328586 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.333140 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 13 06:29:48 crc kubenswrapper[5048]: W1213 06:29:48.333490 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-fd3b3a373c7085cfd0dde272dda7c575d5d1720fe8f16f2eb00302f05ca326a9 WatchSource:0}: Error finding container fd3b3a373c7085cfd0dde272dda7c575d5d1720fe8f16f2eb00302f05ca326a9: Status 404 returned error can't find the container with id fd3b3a373c7085cfd0dde272dda7c575d5d1720fe8f16f2eb00302f05ca326a9 Dec 13 06:29:48 crc kubenswrapper[5048]: W1213 06:29:48.350766 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-a313a6e00bbb6d537d3d643a719dd6a05b70c9b0bfa1cdcedfb4e1bbdc3e7bb9 WatchSource:0}: Error finding container a313a6e00bbb6d537d3d643a719dd6a05b70c9b0bfa1cdcedfb4e1bbdc3e7bb9: Status 404 returned error can't find the container with id a313a6e00bbb6d537d3d643a719dd6a05b70c9b0bfa1cdcedfb4e1bbdc3e7bb9 Dec 13 06:29:48 crc kubenswrapper[5048]: W1213 06:29:48.355650 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-b8a52e7a8aa60f5180ad9292b11c2294da28764bee7013d08f6763088e039f2e WatchSource:0}: Error finding container b8a52e7a8aa60f5180ad9292b11c2294da28764bee7013d08f6763088e039f2e: Status 404 returned error can't find the container with id b8a52e7a8aa60f5180ad9292b11c2294da28764bee7013d08f6763088e039f2e Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.536166 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.536793 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.536900 5048 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.536977 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-13 06:29:49.536953612 +0000 UTC m=+23.403548193 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.537178 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:29:49.537124236 +0000 UTC m=+23.403718957 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.566696 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.566890 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.566709 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.567001 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.567593 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.567807 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.571071 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.574443 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.575391 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.577379 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.578739 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.581182 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.582240 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.583811 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.585401 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.586697 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.587201 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.588769 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.589602 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.590907 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.592714 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.593339 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.594756 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.595942 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.596782 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.598942 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.599734 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.600372 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.600906 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.602260 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.602788 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.604041 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.604784 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.605829 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.606588 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.607796 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.608552 5048 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.608876 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.612582 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.614064 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.614711 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.616809 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.618260 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.619185 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.620496 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.621265 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.623270 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.624109 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.625850 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.626703 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.627303 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.628052 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.630060 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.631346 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.632101 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.632972 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.633649 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.634974 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.635743 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.636358 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.637393 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.637477 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:29:48 crc kubenswrapper[5048]: I1213 06:29:48.637533 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.637718 5048 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.637820 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-13 06:29:49.637800777 +0000 UTC m=+23.504395358 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.637956 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.637977 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.638031 5048 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.638105 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-13 06:29:49.638095564 +0000 UTC m=+23.504690145 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.638177 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.638188 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.638196 5048 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:29:48 crc kubenswrapper[5048]: E1213 06:29:48.638221 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-13 06:29:49.638214178 +0000 UTC m=+23.504808759 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.098805 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1"} Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.098896 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"a313a6e00bbb6d537d3d643a719dd6a05b70c9b0bfa1cdcedfb4e1bbdc3e7bb9"} Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.102296 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6"} Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.102363 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126"} Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.102373 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"fd3b3a373c7085cfd0dde272dda7c575d5d1720fe8f16f2eb00302f05ca326a9"} Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.104458 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.106639 5048 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f" exitCode=255 Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.106721 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f"} Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.112193 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"b8a52e7a8aa60f5180ad9292b11c2294da28764bee7013d08f6763088e039f2e"} Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.124755 5048 scope.go:117] "RemoveContainer" containerID="7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.137670 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.140379 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.171255 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.212756 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.298691 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.346911 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.371450 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.395195 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.426148 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.463825 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.495685 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.496403 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-9qddb"] Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.496841 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-9qddb" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.513164 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.513213 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.518554 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.518678 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.525343 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.553790 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.570039 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.575613 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.575726 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9t2bz\" (UniqueName: \"kubernetes.io/projected/ef455846-1e4c-4fc3-b6a7-de24f53ad9fa-kube-api-access-9t2bz\") pod \"node-ca-9qddb\" (UID: \"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\") " pod="openshift-image-registry/node-ca-9qddb" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.575799 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.575826 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ef455846-1e4c-4fc3-b6a7-de24f53ad9fa-host\") pod \"node-ca-9qddb\" (UID: \"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\") " pod="openshift-image-registry/node-ca-9qddb" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.575846 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/ef455846-1e4c-4fc3-b6a7-de24f53ad9fa-serviceca\") pod \"node-ca-9qddb\" (UID: \"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\") " pod="openshift-image-registry/node-ca-9qddb" Dec 13 06:29:49 crc kubenswrapper[5048]: E1213 06:29:49.576023 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:29:51.575992756 +0000 UTC m=+25.442587337 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:29:49 crc kubenswrapper[5048]: E1213 06:29:49.576127 5048 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 13 06:29:49 crc kubenswrapper[5048]: E1213 06:29:49.576179 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-13 06:29:51.576169921 +0000 UTC m=+25.442764502 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.582213 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-kkfct"] Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.582757 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-kkfct" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.585740 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.585767 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.586684 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.597367 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.617472 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.635755 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.670374 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.676688 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.676743 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9t2bz\" (UniqueName: \"kubernetes.io/projected/ef455846-1e4c-4fc3-b6a7-de24f53ad9fa-kube-api-access-9t2bz\") pod \"node-ca-9qddb\" (UID: \"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\") " pod="openshift-image-registry/node-ca-9qddb" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.676767 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66ldb\" (UniqueName: \"kubernetes.io/projected/262dc4a8-4ed0-49a1-be9e-52071ce3b6b7-kube-api-access-66ldb\") pod \"node-resolver-kkfct\" (UID: \"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\") " pod="openshift-dns/node-resolver-kkfct" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.676791 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/262dc4a8-4ed0-49a1-be9e-52071ce3b6b7-hosts-file\") pod \"node-resolver-kkfct\" (UID: \"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\") " pod="openshift-dns/node-resolver-kkfct" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.676813 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.676834 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.676857 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ef455846-1e4c-4fc3-b6a7-de24f53ad9fa-host\") pod \"node-ca-9qddb\" (UID: \"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\") " pod="openshift-image-registry/node-ca-9qddb" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.676877 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/ef455846-1e4c-4fc3-b6a7-de24f53ad9fa-serviceca\") pod \"node-ca-9qddb\" (UID: \"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\") " pod="openshift-image-registry/node-ca-9qddb" Dec 13 06:29:49 crc kubenswrapper[5048]: E1213 06:29:49.677332 5048 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 13 06:29:49 crc kubenswrapper[5048]: E1213 06:29:49.677356 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.677427 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ef455846-1e4c-4fc3-b6a7-de24f53ad9fa-host\") pod \"node-ca-9qddb\" (UID: \"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\") " pod="openshift-image-registry/node-ca-9qddb" Dec 13 06:29:49 crc kubenswrapper[5048]: E1213 06:29:49.677464 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-13 06:29:51.677416136 +0000 UTC m=+25.544010717 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 13 06:29:49 crc kubenswrapper[5048]: E1213 06:29:49.677469 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 13 06:29:49 crc kubenswrapper[5048]: E1213 06:29:49.677600 5048 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:29:49 crc kubenswrapper[5048]: E1213 06:29:49.677717 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-13 06:29:51.677674914 +0000 UTC m=+25.544269685 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:29:49 crc kubenswrapper[5048]: E1213 06:29:49.677934 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 13 06:29:49 crc kubenswrapper[5048]: E1213 06:29:49.677975 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 13 06:29:49 crc kubenswrapper[5048]: E1213 06:29:49.677996 5048 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:29:49 crc kubenswrapper[5048]: E1213 06:29:49.678061 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-13 06:29:51.678044464 +0000 UTC m=+25.544639225 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.678481 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/ef455846-1e4c-4fc3-b6a7-de24f53ad9fa-serviceca\") pod \"node-ca-9qddb\" (UID: \"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\") " pod="openshift-image-registry/node-ca-9qddb" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.705809 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.708526 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9t2bz\" (UniqueName: \"kubernetes.io/projected/ef455846-1e4c-4fc3-b6a7-de24f53ad9fa-kube-api-access-9t2bz\") pod \"node-ca-9qddb\" (UID: \"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\") " pod="openshift-image-registry/node-ca-9qddb" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.778015 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66ldb\" (UniqueName: \"kubernetes.io/projected/262dc4a8-4ed0-49a1-be9e-52071ce3b6b7-kube-api-access-66ldb\") pod \"node-resolver-kkfct\" (UID: \"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\") " pod="openshift-dns/node-resolver-kkfct" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.778317 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/262dc4a8-4ed0-49a1-be9e-52071ce3b6b7-hosts-file\") pod \"node-resolver-kkfct\" (UID: \"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\") " pod="openshift-dns/node-resolver-kkfct" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.778702 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/262dc4a8-4ed0-49a1-be9e-52071ce3b6b7-hosts-file\") pod \"node-resolver-kkfct\" (UID: \"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\") " pod="openshift-dns/node-resolver-kkfct" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.792526 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.819185 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-9qddb" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.840238 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66ldb\" (UniqueName: \"kubernetes.io/projected/262dc4a8-4ed0-49a1-be9e-52071ce3b6b7-kube-api-access-66ldb\") pod \"node-resolver-kkfct\" (UID: \"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\") " pod="openshift-dns/node-resolver-kkfct" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.881244 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.881926 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.896645 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-kkfct" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.898602 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.914955 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.947052 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.950667 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.968620 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:49Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.996593 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-j7hns"] Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.997202 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.997863 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-r42c6"] Dec 13 06:29:49 crc kubenswrapper[5048]: I1213 06:29:49.998348 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.002455 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.003009 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.003189 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.003417 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.003679 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.003950 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.004427 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-bdd78"] Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.004922 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.005218 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.007033 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Dec 13 06:29:50 crc kubenswrapper[5048]: W1213 06:29:50.017388 5048 reflector.go:561] object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz": failed to list *v1.Secret: secrets "multus-ancillary-tools-dockercfg-vnmsz" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-multus": no relationship found between node 'crc' and this object Dec 13 06:29:50 crc kubenswrapper[5048]: E1213 06:29:50.017491 5048 reflector.go:158] "Unhandled Error" err="object-\"openshift-multus\"/\"multus-ancillary-tools-dockercfg-vnmsz\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"multus-ancillary-tools-dockercfg-vnmsz\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-multus\": no relationship found between node 'crc' and this object" logger="UnhandledError" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.017687 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.017795 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Dec 13 06:29:50 crc kubenswrapper[5048]: W1213 06:29:50.017651 5048 reflector.go:561] object-"openshift-multus"/"default-cni-sysctl-allowlist": failed to list *v1.ConfigMap: configmaps "default-cni-sysctl-allowlist" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-multus": no relationship found between node 'crc' and this object Dec 13 06:29:50 crc kubenswrapper[5048]: E1213 06:29:50.019725 5048 reflector.go:158] "Unhandled Error" err="object-\"openshift-multus\"/\"default-cni-sysctl-allowlist\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"default-cni-sysctl-allowlist\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-multus\": no relationship found between node 'crc' and this object" logger="UnhandledError" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.020808 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.038917 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.053762 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.071005 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.086702 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whdm8\" (UniqueName: \"kubernetes.io/projected/fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b-kube-api-access-whdm8\") pod \"machine-config-daemon-j7hns\" (UID: \"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\") " pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.086757 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-host-run-netns\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.086793 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-host-var-lib-kubelet\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.086822 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-hostroot\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.086895 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-host-run-multus-certs\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.086951 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-host-var-lib-cni-multus\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.086975 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-os-release\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.086998 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-host-run-k8s-cni-cncf-io\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.087020 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-etc-kubernetes\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.087065 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-522qt\" (UniqueName: \"kubernetes.io/projected/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-kube-api-access-522qt\") pod \"multus-additional-cni-plugins-bdd78\" (UID: \"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\") " pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.087091 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-system-cni-dir\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.087110 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-multus-socket-dir-parent\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.087128 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-os-release\") pod \"multus-additional-cni-plugins-bdd78\" (UID: \"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\") " pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.087154 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-system-cni-dir\") pod \"multus-additional-cni-plugins-bdd78\" (UID: \"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\") " pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.087169 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-cnibin\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.087186 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-cnibin\") pod \"multus-additional-cni-plugins-bdd78\" (UID: \"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\") " pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.087203 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-tuning-conf-dir\") pod \"multus-additional-cni-plugins-bdd78\" (UID: \"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\") " pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.087223 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b-proxy-tls\") pod \"machine-config-daemon-j7hns\" (UID: \"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\") " pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.087240 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-multus-conf-dir\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.087258 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-bdd78\" (UID: \"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\") " pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.087286 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/627477f3-8fca-4b40-ace9-68d22f6b8576-multus-daemon-config\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.087304 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b-rootfs\") pod \"machine-config-daemon-j7hns\" (UID: \"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\") " pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.087321 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ngfjz\" (UniqueName: \"kubernetes.io/projected/627477f3-8fca-4b40-ace9-68d22f6b8576-kube-api-access-ngfjz\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.087340 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b-mcd-auth-proxy-config\") pod \"machine-config-daemon-j7hns\" (UID: \"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\") " pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.087357 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/627477f3-8fca-4b40-ace9-68d22f6b8576-cni-binary-copy\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.087382 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-host-var-lib-cni-bin\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.087399 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-cni-binary-copy\") pod \"multus-additional-cni-plugins-bdd78\" (UID: \"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\") " pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.087414 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-multus-cni-dir\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.091958 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.114054 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.133832 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.134355 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-kkfct" event={"ID":"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7","Type":"ContainerStarted","Data":"4038f92e2830afd0aabe0e385299b40ba613d997ab00e22236a443f283525dc2"} Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.138647 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-9qddb" event={"ID":"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa","Type":"ContainerStarted","Data":"78fbeab6ae4fb0047d0bb45bb1c191f0d0191da36c02ce2a055878cd3caa6b21"} Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.150978 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.155561 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d"} Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.156427 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.160781 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.178851 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.194749 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whdm8\" (UniqueName: \"kubernetes.io/projected/fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b-kube-api-access-whdm8\") pod \"machine-config-daemon-j7hns\" (UID: \"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\") " pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.194809 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-host-run-netns\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.194854 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-host-var-lib-kubelet\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.194878 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-hostroot\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.194908 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-host-run-multus-certs\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.194941 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-host-var-lib-cni-multus\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.194964 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-522qt\" (UniqueName: \"kubernetes.io/projected/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-kube-api-access-522qt\") pod \"multus-additional-cni-plugins-bdd78\" (UID: \"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\") " pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.194987 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-system-cni-dir\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.195015 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-os-release\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.195037 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-host-run-k8s-cni-cncf-io\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.195056 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-etc-kubernetes\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.195079 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-os-release\") pod \"multus-additional-cni-plugins-bdd78\" (UID: \"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\") " pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.195102 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-multus-socket-dir-parent\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.195132 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-system-cni-dir\") pod \"multus-additional-cni-plugins-bdd78\" (UID: \"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\") " pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.195152 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-cnibin\") pod \"multus-additional-cni-plugins-bdd78\" (UID: \"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\") " pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.195174 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-cnibin\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.195199 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-tuning-conf-dir\") pod \"multus-additional-cni-plugins-bdd78\" (UID: \"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\") " pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.195221 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b-proxy-tls\") pod \"machine-config-daemon-j7hns\" (UID: \"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\") " pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.195245 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-bdd78\" (UID: \"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\") " pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.195266 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-multus-conf-dir\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.195290 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/627477f3-8fca-4b40-ace9-68d22f6b8576-multus-daemon-config\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.195312 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b-rootfs\") pod \"machine-config-daemon-j7hns\" (UID: \"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\") " pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.195336 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ngfjz\" (UniqueName: \"kubernetes.io/projected/627477f3-8fca-4b40-ace9-68d22f6b8576-kube-api-access-ngfjz\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.195360 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b-mcd-auth-proxy-config\") pod \"machine-config-daemon-j7hns\" (UID: \"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\") " pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.195385 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/627477f3-8fca-4b40-ace9-68d22f6b8576-cni-binary-copy\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.195406 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-cni-binary-copy\") pod \"multus-additional-cni-plugins-bdd78\" (UID: \"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\") " pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.195430 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-multus-cni-dir\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.195470 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-host-var-lib-cni-bin\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.195581 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-host-var-lib-cni-bin\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.195992 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-host-run-netns\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.196027 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-host-var-lib-kubelet\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.196055 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-hostroot\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.196084 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-host-run-multus-certs\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.196111 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-host-var-lib-cni-multus\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.196314 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-system-cni-dir\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.196774 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-os-release\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.196818 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-host-run-k8s-cni-cncf-io\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.196848 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-etc-kubernetes\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.196895 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-os-release\") pod \"multus-additional-cni-plugins-bdd78\" (UID: \"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\") " pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.197088 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-multus-socket-dir-parent\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.197122 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-system-cni-dir\") pod \"multus-additional-cni-plugins-bdd78\" (UID: \"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\") " pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.197155 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-cnibin\") pod \"multus-additional-cni-plugins-bdd78\" (UID: \"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\") " pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.197201 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-cnibin\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.197381 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b-rootfs\") pod \"machine-config-daemon-j7hns\" (UID: \"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\") " pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.197446 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-multus-conf-dir\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.198499 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/627477f3-8fca-4b40-ace9-68d22f6b8576-multus-daemon-config\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.199006 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/627477f3-8fca-4b40-ace9-68d22f6b8576-cni-binary-copy\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.199887 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b-mcd-auth-proxy-config\") pod \"machine-config-daemon-j7hns\" (UID: \"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\") " pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.200673 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-cni-binary-copy\") pod \"multus-additional-cni-plugins-bdd78\" (UID: \"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\") " pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.200728 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/627477f3-8fca-4b40-ace9-68d22f6b8576-multus-cni-dir\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.200824 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-tuning-conf-dir\") pod \"multus-additional-cni-plugins-bdd78\" (UID: \"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\") " pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.204452 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b-proxy-tls\") pod \"machine-config-daemon-j7hns\" (UID: \"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\") " pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.212856 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.223154 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whdm8\" (UniqueName: \"kubernetes.io/projected/fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b-kube-api-access-whdm8\") pod \"machine-config-daemon-j7hns\" (UID: \"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\") " pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.226627 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-522qt\" (UniqueName: \"kubernetes.io/projected/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-kube-api-access-522qt\") pod \"multus-additional-cni-plugins-bdd78\" (UID: \"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\") " pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.232651 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ngfjz\" (UniqueName: \"kubernetes.io/projected/627477f3-8fca-4b40-ace9-68d22f6b8576-kube-api-access-ngfjz\") pod \"multus-r42c6\" (UID: \"627477f3-8fca-4b40-ace9-68d22f6b8576\") " pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.243298 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.259098 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.286880 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.311876 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.323182 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.333527 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-r42c6" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.350325 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.409815 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.478032 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.478958 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hfgcf"] Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.481611 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.489294 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.498075 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.501596 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.501895 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.508644 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.543752 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.575468 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.576111 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:29:50 crc kubenswrapper[5048]: E1213 06:29:50.576256 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.576928 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:29:50 crc kubenswrapper[5048]: E1213 06:29:50.577118 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:29:50 crc kubenswrapper[5048]: E1213 06:29:50.577671 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.586658 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.596774 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.609686 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-run-netns\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.609749 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-run-ovn-kubernetes\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.609783 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/caf986e7-b521-40fd-ae26-18716730d57d-env-overrides\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.609812 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-slash\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.609834 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-node-log\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.609854 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-log-socket\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.609921 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-cni-netd\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.609944 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-cni-bin\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.609972 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-systemd-units\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.609990 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-run-openvswitch\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.610015 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9blq5\" (UniqueName: \"kubernetes.io/projected/caf986e7-b521-40fd-ae26-18716730d57d-kube-api-access-9blq5\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.610042 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-run-systemd\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.610064 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-var-lib-openvswitch\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.610083 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-etc-openvswitch\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.610107 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.610130 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-kubelet\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.610152 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/caf986e7-b521-40fd-ae26-18716730d57d-ovnkube-config\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.610176 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/caf986e7-b521-40fd-ae26-18716730d57d-ovn-node-metrics-cert\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.610210 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/caf986e7-b521-40fd-ae26-18716730d57d-ovnkube-script-lib\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.610232 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-run-ovn\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.681491 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.721158 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-systemd-units\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.721211 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-run-openvswitch\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.721234 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9blq5\" (UniqueName: \"kubernetes.io/projected/caf986e7-b521-40fd-ae26-18716730d57d-kube-api-access-9blq5\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.721253 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.721289 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-run-systemd\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.721387 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.721396 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-run-openvswitch\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.721453 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-var-lib-openvswitch\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.721513 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-var-lib-openvswitch\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.721525 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-etc-openvswitch\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.721557 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-kubelet\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.721579 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/caf986e7-b521-40fd-ae26-18716730d57d-ovnkube-config\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.721599 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/caf986e7-b521-40fd-ae26-18716730d57d-ovn-node-metrics-cert\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.721622 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/caf986e7-b521-40fd-ae26-18716730d57d-ovnkube-script-lib\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.721618 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-run-systemd\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.721679 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-run-ovn\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.721709 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-etc-openvswitch\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.721733 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-kubelet\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.721653 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-run-ovn\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.721806 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-run-netns\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.721869 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-run-ovn-kubernetes\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.721905 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/caf986e7-b521-40fd-ae26-18716730d57d-env-overrides\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.722131 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-log-socket\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.722205 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-slash\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.722232 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-node-log\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.722327 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-cni-netd\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.722368 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-cni-bin\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.722544 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-cni-bin\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.722599 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-run-netns\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.722658 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-run-ovn-kubernetes\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.722742 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/caf986e7-b521-40fd-ae26-18716730d57d-ovnkube-config\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.722835 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-slash\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.722868 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-log-socket\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.722899 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-node-log\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.722929 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-cni-netd\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.723309 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-systemd-units\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.723698 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/caf986e7-b521-40fd-ae26-18716730d57d-ovnkube-script-lib\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.723882 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/caf986e7-b521-40fd-ae26-18716730d57d-env-overrides\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.736245 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/caf986e7-b521-40fd-ae26-18716730d57d-ovn-node-metrics-cert\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.756828 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9blq5\" (UniqueName: \"kubernetes.io/projected/caf986e7-b521-40fd-ae26-18716730d57d-kube-api-access-9blq5\") pod \"ovnkube-node-hfgcf\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.771266 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.794867 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.834702 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.843998 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.855847 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.877582 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.894034 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.908734 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.936095 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.952336 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.963917 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.975339 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:50 crc kubenswrapper[5048]: I1213 06:29:50.992819 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:50Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.011413 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.027439 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.048663 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.073099 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.160528 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-9qddb" event={"ID":"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa","Type":"ContainerStarted","Data":"cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235"} Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.161901 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-r42c6" event={"ID":"627477f3-8fca-4b40-ace9-68d22f6b8576","Type":"ContainerStarted","Data":"c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a"} Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.161931 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-r42c6" event={"ID":"627477f3-8fca-4b40-ace9-68d22f6b8576","Type":"ContainerStarted","Data":"2e46e99fdc1781f0dc896bb1f7867addc4eba1787d3ba8e0f0f36e30a39ef13c"} Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.163486 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerStarted","Data":"1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df"} Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.163513 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerStarted","Data":"6c40d792533fafdf709bb32a6297988b37deba8d255553e27dd630c6b82eeec7"} Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.165635 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerStarted","Data":"a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e"} Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.165701 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerStarted","Data":"a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95"} Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.165714 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerStarted","Data":"511b46c13f3db83734b1f4404eef4a7cc9bf73908aec1aee5598b24754f15efb"} Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.167901 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-kkfct" event={"ID":"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7","Type":"ContainerStarted","Data":"b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1"} Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.177585 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: E1213 06:29:51.198716 5048 configmap.go:193] Couldn't get configMap openshift-multus/default-cni-sysctl-allowlist: failed to sync configmap cache: timed out waiting for the condition Dec 13 06:29:51 crc kubenswrapper[5048]: E1213 06:29:51.198832 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-cni-sysctl-allowlist podName:8e1f99bb-aa5e-40eb-9b21-ff04d41acf50 nodeName:}" failed. No retries permitted until 2025-12-13 06:29:51.698806789 +0000 UTC m=+25.565401370 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cni-sysctl-allowlist" (UniqueName: "kubernetes.io/configmap/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-cni-sysctl-allowlist") pod "multus-additional-cni-plugins-bdd78" (UID: "8e1f99bb-aa5e-40eb-9b21-ff04d41acf50") : failed to sync configmap cache: timed out waiting for the condition Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.200625 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.216403 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.234828 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.254427 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.277268 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.295756 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.311525 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.314051 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.351065 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.392198 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.417776 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.432949 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.447610 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.459509 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.479841 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.485034 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.493669 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.505059 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.507226 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.511694 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.531562 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.546346 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.559051 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.573620 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.586255 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.605272 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.623597 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.633348 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.633484 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:29:51 crc kubenswrapper[5048]: E1213 06:29:51.633576 5048 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 13 06:29:51 crc kubenswrapper[5048]: E1213 06:29:51.633689 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:29:55.633639038 +0000 UTC m=+29.500233619 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:29:51 crc kubenswrapper[5048]: E1213 06:29:51.633747 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-13 06:29:55.63373622 +0000 UTC m=+29.500330801 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.647136 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.669049 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.685093 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.707878 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.733525 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.734270 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-bdd78\" (UID: \"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\") " pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.734386 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.734419 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.734484 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:29:51 crc kubenswrapper[5048]: E1213 06:29:51.734625 5048 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 13 06:29:51 crc kubenswrapper[5048]: E1213 06:29:51.734708 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 13 06:29:51 crc kubenswrapper[5048]: E1213 06:29:51.734785 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 13 06:29:51 crc kubenswrapper[5048]: E1213 06:29:51.734807 5048 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:29:51 crc kubenswrapper[5048]: E1213 06:29:51.734729 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 13 06:29:51 crc kubenswrapper[5048]: E1213 06:29:51.734905 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 13 06:29:51 crc kubenswrapper[5048]: E1213 06:29:51.734926 5048 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:29:51 crc kubenswrapper[5048]: E1213 06:29:51.734736 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-13 06:29:55.734716359 +0000 UTC m=+29.601310940 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 13 06:29:51 crc kubenswrapper[5048]: E1213 06:29:51.735007 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-13 06:29:55.734976016 +0000 UTC m=+29.601570737 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:29:51 crc kubenswrapper[5048]: E1213 06:29:51.735040 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-13 06:29:55.735030437 +0000 UTC m=+29.601625228 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.735199 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/8e1f99bb-aa5e-40eb-9b21-ff04d41acf50-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-bdd78\" (UID: \"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\") " pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.763681 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.792866 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.806418 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.837944 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.854740 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.870859 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.889091 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.906128 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.922068 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.944568 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.965314 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.980345 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:51 crc kubenswrapper[5048]: I1213 06:29:51.993299 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:51Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.006131 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-bdd78" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.034339 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.173523 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e"} Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.195170 5048 generic.go:334] "Generic (PLEG): container finished" podID="caf986e7-b521-40fd-ae26-18716730d57d" containerID="1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df" exitCode=0 Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.195361 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerDied","Data":"1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df"} Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.197419 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" event={"ID":"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50","Type":"ContainerStarted","Data":"89884325f1a0f7dab38f2c94c2b71762e159c157edfd12514779adc3094f2cd5"} Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.203661 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.227965 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.268014 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.291893 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.317959 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.361523 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.403412 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.432909 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.461079 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.475982 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.492872 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.551264 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.565848 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.565909 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:29:52 crc kubenswrapper[5048]: E1213 06:29:52.565995 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:29:52 crc kubenswrapper[5048]: E1213 06:29:52.566090 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.566208 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:29:52 crc kubenswrapper[5048]: E1213 06:29:52.566266 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.571689 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.590443 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.611888 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.632921 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.646072 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.662966 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.686036 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.703021 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.723570 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.761731 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.800977 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.838514 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.887224 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.921994 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.962590 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:52 crc kubenswrapper[5048]: I1213 06:29:52.999540 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:52Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:53 crc kubenswrapper[5048]: I1213 06:29:53.040464 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:53Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:53 crc kubenswrapper[5048]: I1213 06:29:53.081538 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:53Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:53 crc kubenswrapper[5048]: I1213 06:29:53.203630 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerStarted","Data":"61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87"} Dec 13 06:29:53 crc kubenswrapper[5048]: I1213 06:29:53.205575 5048 generic.go:334] "Generic (PLEG): container finished" podID="8e1f99bb-aa5e-40eb-9b21-ff04d41acf50" containerID="fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2" exitCode=0 Dec 13 06:29:53 crc kubenswrapper[5048]: I1213 06:29:53.205618 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" event={"ID":"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50","Type":"ContainerDied","Data":"fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2"} Dec 13 06:29:53 crc kubenswrapper[5048]: I1213 06:29:53.223096 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:53Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:53 crc kubenswrapper[5048]: I1213 06:29:53.241179 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:53Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:53 crc kubenswrapper[5048]: I1213 06:29:53.256982 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:53Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:53 crc kubenswrapper[5048]: I1213 06:29:53.282202 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:53Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:53 crc kubenswrapper[5048]: I1213 06:29:53.300401 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:53Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:53 crc kubenswrapper[5048]: I1213 06:29:53.336215 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:53Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:53 crc kubenswrapper[5048]: I1213 06:29:53.359964 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:53Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:53 crc kubenswrapper[5048]: I1213 06:29:53.403558 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:53Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:53 crc kubenswrapper[5048]: I1213 06:29:53.439624 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:53Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:53 crc kubenswrapper[5048]: I1213 06:29:53.478964 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:53Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:53 crc kubenswrapper[5048]: I1213 06:29:53.521387 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:53Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:53 crc kubenswrapper[5048]: I1213 06:29:53.560238 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:53Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:53 crc kubenswrapper[5048]: I1213 06:29:53.601888 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:53Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:53 crc kubenswrapper[5048]: I1213 06:29:53.638368 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:53Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:53 crc kubenswrapper[5048]: I1213 06:29:53.677903 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:53Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.211727 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerStarted","Data":"82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f"} Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.288848 5048 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.291534 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.291608 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.291626 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.291840 5048 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.299267 5048 kubelet_node_status.go:115] "Node was previously registered" node="crc" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.299788 5048 kubelet_node_status.go:79] "Successfully registered node" node="crc" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.301113 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.301138 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.301149 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.301164 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.301180 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:54Z","lastTransitionTime":"2025-12-13T06:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:54 crc kubenswrapper[5048]: E1213 06:29:54.329383 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:54Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.333277 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.333317 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.333334 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.333354 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.333366 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:54Z","lastTransitionTime":"2025-12-13T06:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:54 crc kubenswrapper[5048]: E1213 06:29:54.349013 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:54Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.353844 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.353903 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.353923 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.353947 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.353963 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:54Z","lastTransitionTime":"2025-12-13T06:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:54 crc kubenswrapper[5048]: E1213 06:29:54.369146 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:54Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.374362 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.374399 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.374409 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.374434 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.374447 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:54Z","lastTransitionTime":"2025-12-13T06:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:54 crc kubenswrapper[5048]: E1213 06:29:54.394572 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:54Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.399050 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.399118 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.399138 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.399164 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.399183 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:54Z","lastTransitionTime":"2025-12-13T06:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:54 crc kubenswrapper[5048]: E1213 06:29:54.417964 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:54Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:54 crc kubenswrapper[5048]: E1213 06:29:54.418088 5048 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.420235 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.420281 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.420295 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.420316 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.420365 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:54Z","lastTransitionTime":"2025-12-13T06:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.523559 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.523612 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.523623 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.523642 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.523656 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:54Z","lastTransitionTime":"2025-12-13T06:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.566295 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.566406 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:29:54 crc kubenswrapper[5048]: E1213 06:29:54.566468 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.566511 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:29:54 crc kubenswrapper[5048]: E1213 06:29:54.566609 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:29:54 crc kubenswrapper[5048]: E1213 06:29:54.566929 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.626711 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.626806 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.626826 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.626860 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.626879 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:54Z","lastTransitionTime":"2025-12-13T06:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.730559 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.730651 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.730673 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.730700 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.730728 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:54Z","lastTransitionTime":"2025-12-13T06:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.834361 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.834438 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.834475 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.834503 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.834518 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:54Z","lastTransitionTime":"2025-12-13T06:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.937013 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.937065 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.937075 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.937094 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:54 crc kubenswrapper[5048]: I1213 06:29:54.937109 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:54Z","lastTransitionTime":"2025-12-13T06:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.039723 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.039776 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.039788 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.039809 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.039823 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:55Z","lastTransitionTime":"2025-12-13T06:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.142662 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.142711 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.142725 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.142746 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.142759 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:55Z","lastTransitionTime":"2025-12-13T06:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.245947 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.246010 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.246022 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.246048 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.246063 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:55Z","lastTransitionTime":"2025-12-13T06:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.348091 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.348145 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.348158 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.348174 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.348184 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:55Z","lastTransitionTime":"2025-12-13T06:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.451618 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.451674 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.451685 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.451708 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.451724 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:55Z","lastTransitionTime":"2025-12-13T06:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.559189 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.559254 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.559264 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.559284 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.559297 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:55Z","lastTransitionTime":"2025-12-13T06:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.663027 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.663074 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.663084 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.663104 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.663116 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:55Z","lastTransitionTime":"2025-12-13T06:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.698725 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:29:55 crc kubenswrapper[5048]: E1213 06:29:55.698889 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:30:03.698852022 +0000 UTC m=+37.565446613 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.698956 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:29:55 crc kubenswrapper[5048]: E1213 06:29:55.699084 5048 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 13 06:29:55 crc kubenswrapper[5048]: E1213 06:29:55.699141 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-13 06:30:03.69913094 +0000 UTC m=+37.565725521 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.766123 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.766183 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.766197 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.766218 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.766232 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:55Z","lastTransitionTime":"2025-12-13T06:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.799677 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.799740 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.799779 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:29:55 crc kubenswrapper[5048]: E1213 06:29:55.799878 5048 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 13 06:29:55 crc kubenswrapper[5048]: E1213 06:29:55.799947 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-13 06:30:03.799929243 +0000 UTC m=+37.666523824 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 13 06:29:55 crc kubenswrapper[5048]: E1213 06:29:55.799879 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 13 06:29:55 crc kubenswrapper[5048]: E1213 06:29:55.800018 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 13 06:29:55 crc kubenswrapper[5048]: E1213 06:29:55.800033 5048 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:29:55 crc kubenswrapper[5048]: E1213 06:29:55.800038 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 13 06:29:55 crc kubenswrapper[5048]: E1213 06:29:55.800101 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 13 06:29:55 crc kubenswrapper[5048]: E1213 06:29:55.800124 5048 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:29:55 crc kubenswrapper[5048]: E1213 06:29:55.800067 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-13 06:30:03.800056017 +0000 UTC m=+37.666650598 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:29:55 crc kubenswrapper[5048]: E1213 06:29:55.800240 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-13 06:30:03.800212141 +0000 UTC m=+37.666806932 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.869793 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.869851 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.869861 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.869882 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.869895 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:55Z","lastTransitionTime":"2025-12-13T06:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.973301 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.973371 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.973389 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.973409 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:55 crc kubenswrapper[5048]: I1213 06:29:55.973422 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:55Z","lastTransitionTime":"2025-12-13T06:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.076728 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.076784 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.076797 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.076818 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.076831 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:56Z","lastTransitionTime":"2025-12-13T06:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.180167 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.180759 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.180775 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.180796 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.180810 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:56Z","lastTransitionTime":"2025-12-13T06:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.221297 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" event={"ID":"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50","Type":"ContainerStarted","Data":"61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec"} Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.224567 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerStarted","Data":"ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541"} Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.224629 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerStarted","Data":"c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8"} Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.244224 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.258896 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.271880 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.284040 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.284105 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.284119 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.284142 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.284156 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:56Z","lastTransitionTime":"2025-12-13T06:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.297494 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.315729 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.331108 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.345900 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.362853 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.375763 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.387643 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.387701 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.387711 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.387753 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.387767 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:56Z","lastTransitionTime":"2025-12-13T06:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.398795 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.418246 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.446946 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.464287 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.480975 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.491484 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.491554 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.491569 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.491595 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.491608 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:56Z","lastTransitionTime":"2025-12-13T06:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.501633 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.566588 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.566665 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.566779 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:29:56 crc kubenswrapper[5048]: E1213 06:29:56.566818 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:29:56 crc kubenswrapper[5048]: E1213 06:29:56.567006 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:29:56 crc kubenswrapper[5048]: E1213 06:29:56.567103 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.587136 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.596580 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.596633 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.596649 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.596673 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.596690 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:56Z","lastTransitionTime":"2025-12-13T06:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.606356 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.624837 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.647323 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.666595 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.682472 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.698554 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.698600 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.698615 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.698636 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.698652 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:56Z","lastTransitionTime":"2025-12-13T06:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.706397 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.735803 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.754129 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.769549 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.789771 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.801920 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.802306 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.802396 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.802507 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.802588 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:56Z","lastTransitionTime":"2025-12-13T06:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.806440 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.821414 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.836904 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.855747 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.906090 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.906225 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.906272 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.906351 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:56 crc kubenswrapper[5048]: I1213 06:29:56.906368 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:56Z","lastTransitionTime":"2025-12-13T06:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.010520 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.010588 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.010604 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.010666 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.010687 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:57Z","lastTransitionTime":"2025-12-13T06:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.113766 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.113855 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.113869 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.113893 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.113904 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:57Z","lastTransitionTime":"2025-12-13T06:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.216600 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.216669 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.216683 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.216704 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.216715 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:57Z","lastTransitionTime":"2025-12-13T06:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.243232 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerStarted","Data":"4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f"} Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.243296 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerStarted","Data":"28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767"} Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.245400 5048 generic.go:334] "Generic (PLEG): container finished" podID="8e1f99bb-aa5e-40eb-9b21-ff04d41acf50" containerID="61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec" exitCode=0 Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.245464 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" event={"ID":"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50","Type":"ContainerDied","Data":"61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec"} Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.272904 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.288111 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.304624 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.318190 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.319260 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.319293 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.319306 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.319330 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.319344 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:57Z","lastTransitionTime":"2025-12-13T06:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.333021 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.348227 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.364859 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.379647 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.402679 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.420747 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.422776 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.422806 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.423393 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.423416 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.423426 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:57Z","lastTransitionTime":"2025-12-13T06:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.439741 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.453173 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.471408 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.490758 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.509425 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.527079 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.527142 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.527157 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.527178 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.527193 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:57Z","lastTransitionTime":"2025-12-13T06:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.630046 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.630131 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.630148 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.630175 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.630190 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:57Z","lastTransitionTime":"2025-12-13T06:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.733647 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.733710 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.733725 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.733785 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.733802 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:57Z","lastTransitionTime":"2025-12-13T06:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.837610 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.837674 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.837690 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.837715 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.837734 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:57Z","lastTransitionTime":"2025-12-13T06:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.940113 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.940175 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.940189 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.940211 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:57 crc kubenswrapper[5048]: I1213 06:29:57.940226 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:57Z","lastTransitionTime":"2025-12-13T06:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.043816 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.043863 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.043873 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.043893 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.043906 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:58Z","lastTransitionTime":"2025-12-13T06:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.147409 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.147482 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.147498 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.147521 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.147536 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:58Z","lastTransitionTime":"2025-12-13T06:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.251078 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.251158 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.251174 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.251210 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.251230 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:58Z","lastTransitionTime":"2025-12-13T06:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.254280 5048 generic.go:334] "Generic (PLEG): container finished" podID="8e1f99bb-aa5e-40eb-9b21-ff04d41acf50" containerID="cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c" exitCode=0 Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.254353 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" event={"ID":"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50","Type":"ContainerDied","Data":"cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c"} Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.277734 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.294047 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.314899 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.344400 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.354979 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.355024 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.355036 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.355054 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.355068 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:58Z","lastTransitionTime":"2025-12-13T06:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.359013 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.373972 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.390252 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.410380 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.427219 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.442967 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.457695 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.457730 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.457741 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.457759 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.457771 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:58Z","lastTransitionTime":"2025-12-13T06:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.458427 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.473289 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.487097 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.518722 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.551011 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.560956 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.561000 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.561011 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.561030 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.561041 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:58Z","lastTransitionTime":"2025-12-13T06:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.568514 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:29:58 crc kubenswrapper[5048]: E1213 06:29:58.568671 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.568750 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:29:58 crc kubenswrapper[5048]: E1213 06:29:58.568800 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.568848 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:29:58 crc kubenswrapper[5048]: E1213 06:29:58.568897 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.664764 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.664821 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.664832 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.664854 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.664866 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:58Z","lastTransitionTime":"2025-12-13T06:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.768486 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.768603 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.768616 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.768637 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.768651 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:58Z","lastTransitionTime":"2025-12-13T06:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.872638 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.872739 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.872753 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.872775 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.872786 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:58Z","lastTransitionTime":"2025-12-13T06:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.980784 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.980835 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.980847 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.980869 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:58 crc kubenswrapper[5048]: I1213 06:29:58.980886 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:58Z","lastTransitionTime":"2025-12-13T06:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.083621 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.083699 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.083711 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.083734 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.083746 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:59Z","lastTransitionTime":"2025-12-13T06:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.187255 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.187333 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.187348 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.187371 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.187384 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:59Z","lastTransitionTime":"2025-12-13T06:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.264550 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerStarted","Data":"584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b"} Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.268315 5048 generic.go:334] "Generic (PLEG): container finished" podID="8e1f99bb-aa5e-40eb-9b21-ff04d41acf50" containerID="e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c" exitCode=0 Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.268396 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" event={"ID":"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50","Type":"ContainerDied","Data":"e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c"} Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.284772 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.291247 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.291299 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.291317 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.291346 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.291362 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:59Z","lastTransitionTime":"2025-12-13T06:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.318164 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.335373 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.349558 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.365893 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.392684 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.399568 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.399634 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.399649 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.399674 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.399690 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:59Z","lastTransitionTime":"2025-12-13T06:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.414103 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.430710 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.449773 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.471542 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.492299 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.502692 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.502731 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.502740 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.502757 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.502767 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:59Z","lastTransitionTime":"2025-12-13T06:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.512441 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.526657 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.545064 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.559897 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:29:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.605654 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.605723 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.605739 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.605761 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.605778 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:59Z","lastTransitionTime":"2025-12-13T06:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.708721 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.708789 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.708803 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.708827 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.708842 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:59Z","lastTransitionTime":"2025-12-13T06:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.811476 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.812004 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.812020 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.812045 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.812060 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:59Z","lastTransitionTime":"2025-12-13T06:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.914464 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.914538 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.914582 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.914601 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:29:59 crc kubenswrapper[5048]: I1213 06:29:59.914612 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:29:59Z","lastTransitionTime":"2025-12-13T06:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.017770 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.017857 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.017879 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.017903 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.017919 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:00Z","lastTransitionTime":"2025-12-13T06:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.122319 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.122553 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.122574 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.122657 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.122676 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:00Z","lastTransitionTime":"2025-12-13T06:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.225861 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.225946 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.225960 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.225977 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.225988 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:00Z","lastTransitionTime":"2025-12-13T06:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.276460 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" event={"ID":"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50","Type":"ContainerStarted","Data":"ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045"} Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.291779 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:00Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.317294 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:00Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.330804 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.330875 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.330888 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.330915 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.330927 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:00Z","lastTransitionTime":"2025-12-13T06:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.343508 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:00Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.358313 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:00Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.372467 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:00Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.388337 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:00Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.403530 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:00Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.417375 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:00Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.431767 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:00Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.434893 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.434967 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.434981 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.435004 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.435018 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:00Z","lastTransitionTime":"2025-12-13T06:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.449665 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:00Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.466782 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:00Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.482694 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:00Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.501404 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:00Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.517792 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:00Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.534703 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:00Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.538146 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.538195 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.538208 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.538231 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.538248 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:00Z","lastTransitionTime":"2025-12-13T06:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.566927 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:00 crc kubenswrapper[5048]: E1213 06:30:00.567138 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.567587 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:00 crc kubenswrapper[5048]: E1213 06:30:00.567692 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.567859 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:00 crc kubenswrapper[5048]: E1213 06:30:00.568068 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.642390 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.642477 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.642492 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.642521 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.642538 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:00Z","lastTransitionTime":"2025-12-13T06:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.745133 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.745186 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.745196 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.745216 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.745229 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:00Z","lastTransitionTime":"2025-12-13T06:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.848278 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.848345 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.848357 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.848377 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.848390 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:00Z","lastTransitionTime":"2025-12-13T06:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.951247 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.951304 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.951316 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.951339 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:00 crc kubenswrapper[5048]: I1213 06:30:00.951353 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:00Z","lastTransitionTime":"2025-12-13T06:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.054533 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.054612 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.054626 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.054652 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.054671 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:01Z","lastTransitionTime":"2025-12-13T06:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.157518 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.157563 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.157575 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.157593 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.157603 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:01Z","lastTransitionTime":"2025-12-13T06:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.261261 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.261316 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.261326 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.261346 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.261362 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:01Z","lastTransitionTime":"2025-12-13T06:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.285629 5048 generic.go:334] "Generic (PLEG): container finished" podID="8e1f99bb-aa5e-40eb-9b21-ff04d41acf50" containerID="ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045" exitCode=0 Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.285709 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" event={"ID":"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50","Type":"ContainerDied","Data":"ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045"} Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.303821 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:01Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.321507 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:01Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.340865 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:01Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.358873 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:01Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.364070 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.364119 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.364133 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.364152 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.364167 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:01Z","lastTransitionTime":"2025-12-13T06:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.383201 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:01Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.398129 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:01Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.410894 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:01Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.428683 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:01Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.445056 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:01Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.461874 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:01Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.467665 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.468167 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.468257 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.468344 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.468421 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:01Z","lastTransitionTime":"2025-12-13T06:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.482773 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:01Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.498582 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:01Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.510961 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:01Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.530038 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:01Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.560774 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:01Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.577024 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.577083 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.577103 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.577124 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.577134 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:01Z","lastTransitionTime":"2025-12-13T06:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.680483 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.681016 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.681030 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.681054 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.681067 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:01Z","lastTransitionTime":"2025-12-13T06:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.787271 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.787368 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.787803 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.787835 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.787853 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:01Z","lastTransitionTime":"2025-12-13T06:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.891397 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.891562 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.891576 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.891598 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.891610 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:01Z","lastTransitionTime":"2025-12-13T06:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.995865 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.995929 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.995942 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.995979 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:01 crc kubenswrapper[5048]: I1213 06:30:01.995993 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:01Z","lastTransitionTime":"2025-12-13T06:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.100523 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.100603 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.100617 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.100663 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.100684 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:02Z","lastTransitionTime":"2025-12-13T06:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.206242 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.206305 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.206319 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.206347 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.206363 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:02Z","lastTransitionTime":"2025-12-13T06:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.295528 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerStarted","Data":"c15a65d5a7a28bcc0a5066c5dd5a81ab6f4055f55e87e4a0c2d5e0571e09eebe"} Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.309999 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.310063 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.310075 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.310099 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.310114 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:02Z","lastTransitionTime":"2025-12-13T06:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.413992 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.414045 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.414061 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.414085 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.414104 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:02Z","lastTransitionTime":"2025-12-13T06:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.517668 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.517726 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.517737 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.517757 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.517772 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:02Z","lastTransitionTime":"2025-12-13T06:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.566618 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.566722 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:02 crc kubenswrapper[5048]: E1213 06:30:02.566808 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.566802 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:02 crc kubenswrapper[5048]: E1213 06:30:02.566914 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:02 crc kubenswrapper[5048]: E1213 06:30:02.566900 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.620398 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.620501 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.620514 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.620538 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.620551 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:02Z","lastTransitionTime":"2025-12-13T06:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.723359 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.723398 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.723409 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.723428 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.723455 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:02Z","lastTransitionTime":"2025-12-13T06:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.826645 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.826721 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.826735 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.826794 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.826813 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:02Z","lastTransitionTime":"2025-12-13T06:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.929649 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.929719 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.929736 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.929807 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:02 crc kubenswrapper[5048]: I1213 06:30:02.929825 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:02Z","lastTransitionTime":"2025-12-13T06:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.032651 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.033101 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.033218 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.033327 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.033427 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:03Z","lastTransitionTime":"2025-12-13T06:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.138173 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.138229 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.138245 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.138267 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.138283 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:03Z","lastTransitionTime":"2025-12-13T06:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.241852 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.241910 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.241925 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.241945 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.241957 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:03Z","lastTransitionTime":"2025-12-13T06:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.303582 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" event={"ID":"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50","Type":"ContainerStarted","Data":"c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9"} Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.304176 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.320114 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.332069 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.332755 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w"] Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.333215 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.334528 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.337391 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.344218 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.344293 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.344308 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.344326 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.344428 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:03Z","lastTransitionTime":"2025-12-13T06:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.348191 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.366585 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.381105 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.384881 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-gdm7w\" (UID: \"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.385006 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9-env-overrides\") pod \"ovnkube-control-plane-749d76644c-gdm7w\" (UID: \"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.385044 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-gdm7w\" (UID: \"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.385086 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnrrp\" (UniqueName: \"kubernetes.io/projected/3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9-kube-api-access-cnrrp\") pod \"ovnkube-control-plane-749d76644c-gdm7w\" (UID: \"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.402354 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.421268 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.443501 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.448108 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.448155 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.448166 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.448184 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.448196 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:03Z","lastTransitionTime":"2025-12-13T06:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.458152 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.472321 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.485776 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9-env-overrides\") pod \"ovnkube-control-plane-749d76644c-gdm7w\" (UID: \"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.485841 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-gdm7w\" (UID: \"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.485874 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnrrp\" (UniqueName: \"kubernetes.io/projected/3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9-kube-api-access-cnrrp\") pod \"ovnkube-control-plane-749d76644c-gdm7w\" (UID: \"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.485926 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-gdm7w\" (UID: \"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.486636 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9-env-overrides\") pod \"ovnkube-control-plane-749d76644c-gdm7w\" (UID: \"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.487093 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-gdm7w\" (UID: \"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.491288 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.493331 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-gdm7w\" (UID: \"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.503640 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnrrp\" (UniqueName: \"kubernetes.io/projected/3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9-kube-api-access-cnrrp\") pod \"ovnkube-control-plane-749d76644c-gdm7w\" (UID: \"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.509408 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.525018 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.539393 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.551410 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.551460 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.551474 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.551495 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.551506 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:03Z","lastTransitionTime":"2025-12-13T06:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.560186 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.580970 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c15a65d5a7a28bcc0a5066c5dd5a81ab6f4055f55e87e4a0c2d5e0571e09eebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.595800 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.609210 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.624726 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.640361 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.652552 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.654449 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.654481 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.654491 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.654510 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.654523 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:03Z","lastTransitionTime":"2025-12-13T06:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.657777 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: W1213 06:30:03.677355 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3fa9d7e0_dc6b_4884_beba_d2f5d3d6f3d9.slice/crio-fbabbf86c712c4b13d21dfa75b4e5c5865ec2bd21cc3499958381e0e7568b8df WatchSource:0}: Error finding container fbabbf86c712c4b13d21dfa75b4e5c5865ec2bd21cc3499958381e0e7568b8df: Status 404 returned error can't find the container with id fbabbf86c712c4b13d21dfa75b4e5c5865ec2bd21cc3499958381e0e7568b8df Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.677392 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.696284 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.712144 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.736192 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.754127 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.757767 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.757809 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.757844 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.757869 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.757882 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:03Z","lastTransitionTime":"2025-12-13T06:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.771686 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.786921 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.788350 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:30:03 crc kubenswrapper[5048]: E1213 06:30:03.788627 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:30:19.78860082 +0000 UTC m=+53.655195401 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.788923 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:03 crc kubenswrapper[5048]: E1213 06:30:03.789800 5048 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 13 06:30:03 crc kubenswrapper[5048]: E1213 06:30:03.789904 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-13 06:30:19.789876725 +0000 UTC m=+53.656471306 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.803410 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.819192 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.836981 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:03Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.860216 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.860265 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.860277 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.860297 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.860309 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:03Z","lastTransitionTime":"2025-12-13T06:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.891423 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.891526 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.891560 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:03 crc kubenswrapper[5048]: E1213 06:30:03.891713 5048 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 13 06:30:03 crc kubenswrapper[5048]: E1213 06:30:03.891796 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-13 06:30:19.891773089 +0000 UTC m=+53.758367670 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 13 06:30:03 crc kubenswrapper[5048]: E1213 06:30:03.892097 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 13 06:30:03 crc kubenswrapper[5048]: E1213 06:30:03.892122 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 13 06:30:03 crc kubenswrapper[5048]: E1213 06:30:03.892300 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 13 06:30:03 crc kubenswrapper[5048]: E1213 06:30:03.892318 5048 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:30:03 crc kubenswrapper[5048]: E1213 06:30:03.892357 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-13 06:30:19.892347104 +0000 UTC m=+53.758941685 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:30:03 crc kubenswrapper[5048]: E1213 06:30:03.893511 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 13 06:30:03 crc kubenswrapper[5048]: E1213 06:30:03.893565 5048 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:30:03 crc kubenswrapper[5048]: E1213 06:30:03.893610 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-13 06:30:19.893597699 +0000 UTC m=+53.760192280 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.963122 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.963180 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.963195 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.963218 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:03 crc kubenswrapper[5048]: I1213 06:30:03.963233 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:03Z","lastTransitionTime":"2025-12-13T06:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.067302 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.067816 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.067905 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.068019 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.068122 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:04Z","lastTransitionTime":"2025-12-13T06:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.171419 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.172700 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.172763 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.172795 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.172817 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:04Z","lastTransitionTime":"2025-12-13T06:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.201139 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.216109 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.228563 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.244674 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.258295 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.276067 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.276143 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.276186 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.276209 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.276223 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:04Z","lastTransitionTime":"2025-12-13T06:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.282088 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c15a65d5a7a28bcc0a5066c5dd5a81ab6f4055f55e87e4a0c2d5e0571e09eebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.295894 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.308097 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" event={"ID":"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9","Type":"ContainerStarted","Data":"fbabbf86c712c4b13d21dfa75b4e5c5865ec2bd21cc3499958381e0e7568b8df"} Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.308211 5048 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.309054 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.316254 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.332506 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.342272 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.347515 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.363904 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.378999 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.379048 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.379059 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.379079 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.379094 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:04Z","lastTransitionTime":"2025-12-13T06:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.390490 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.406994 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.425306 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.433949 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.433995 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.434006 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.434027 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.434042 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:04Z","lastTransitionTime":"2025-12-13T06:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.445349 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: E1213 06:30:04.447703 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.454397 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.454427 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.454455 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.454475 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.454487 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:04Z","lastTransitionTime":"2025-12-13T06:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.463181 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: E1213 06:30:04.469566 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.475823 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.475887 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.475899 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.475922 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.475937 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:04Z","lastTransitionTime":"2025-12-13T06:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.485975 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: E1213 06:30:04.494140 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.498233 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.498275 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.498284 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.498302 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.498315 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:04Z","lastTransitionTime":"2025-12-13T06:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.504486 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: E1213 06:30:04.514365 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.518160 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.519911 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.519956 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.519971 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.519995 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.520009 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:04Z","lastTransitionTime":"2025-12-13T06:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.532397 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: E1213 06:30:04.532872 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: E1213 06:30:04.533208 5048 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.535411 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.535491 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.535509 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.535531 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.535546 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:04Z","lastTransitionTime":"2025-12-13T06:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.547853 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.559710 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.566759 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.566786 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.566880 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:04 crc kubenswrapper[5048]: E1213 06:30:04.567025 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:04 crc kubenswrapper[5048]: E1213 06:30:04.567244 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:04 crc kubenswrapper[5048]: E1213 06:30:04.567173 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.572762 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.625787 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c15a65d5a7a28bcc0a5066c5dd5a81ab6f4055f55e87e4a0c2d5e0571e09eebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.638739 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.639137 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.639237 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.639314 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.639378 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:04Z","lastTransitionTime":"2025-12-13T06:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.651018 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.685304 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.707286 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.724103 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.742685 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.742746 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.742760 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.742777 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.742790 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:04Z","lastTransitionTime":"2025-12-13T06:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.742786 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.767605 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.782316 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.796899 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.812031 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:04Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.845528 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.845918 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.846008 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.846167 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.846253 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:04Z","lastTransitionTime":"2025-12-13T06:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.949749 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.949792 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.949806 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.949829 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:04 crc kubenswrapper[5048]: I1213 06:30:04.949841 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:04Z","lastTransitionTime":"2025-12-13T06:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.052359 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.052471 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.052488 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.052514 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.052530 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:05Z","lastTransitionTime":"2025-12-13T06:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.155666 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.155716 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.155727 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.155749 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.155762 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:05Z","lastTransitionTime":"2025-12-13T06:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.220109 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-tm62z"] Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.221260 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:05 crc kubenswrapper[5048]: E1213 06:30:05.221359 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.238806 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:05Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.255024 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:05Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.258063 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.258145 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.258157 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.258175 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.258188 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:05Z","lastTransitionTime":"2025-12-13T06:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.267943 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:05Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.289390 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c15a65d5a7a28bcc0a5066c5dd5a81ab6f4055f55e87e4a0c2d5e0571e09eebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:05Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.306876 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs\") pod \"network-metrics-daemon-tm62z\" (UID: \"226b24e2-92c6-43d1-a621-09702ffa8fd4\") " pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.306948 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhpwj\" (UniqueName: \"kubernetes.io/projected/226b24e2-92c6-43d1-a621-09702ffa8fd4-kube-api-access-mhpwj\") pod \"network-metrics-daemon-tm62z\" (UID: \"226b24e2-92c6-43d1-a621-09702ffa8fd4\") " pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.307934 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:05Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.315831 5048 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.327559 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:05Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.343804 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:05Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.360533 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.360582 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.360591 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.360606 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.360617 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:05Z","lastTransitionTime":"2025-12-13T06:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.362922 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:05Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.378671 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:05Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.402842 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:05Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.407922 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs\") pod \"network-metrics-daemon-tm62z\" (UID: \"226b24e2-92c6-43d1-a621-09702ffa8fd4\") " pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.407989 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhpwj\" (UniqueName: \"kubernetes.io/projected/226b24e2-92c6-43d1-a621-09702ffa8fd4-kube-api-access-mhpwj\") pod \"network-metrics-daemon-tm62z\" (UID: \"226b24e2-92c6-43d1-a621-09702ffa8fd4\") " pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:05 crc kubenswrapper[5048]: E1213 06:30:05.408119 5048 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 13 06:30:05 crc kubenswrapper[5048]: E1213 06:30:05.408229 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs podName:226b24e2-92c6-43d1-a621-09702ffa8fd4 nodeName:}" failed. No retries permitted until 2025-12-13 06:30:05.908206055 +0000 UTC m=+39.774800696 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs") pod "network-metrics-daemon-tm62z" (UID: "226b24e2-92c6-43d1-a621-09702ffa8fd4") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.418538 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:05Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.430683 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhpwj\" (UniqueName: \"kubernetes.io/projected/226b24e2-92c6-43d1-a621-09702ffa8fd4-kube-api-access-mhpwj\") pod \"network-metrics-daemon-tm62z\" (UID: \"226b24e2-92c6-43d1-a621-09702ffa8fd4\") " pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.435666 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:05Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.454690 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:05Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.464309 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.464355 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.464364 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.464382 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.464393 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:05Z","lastTransitionTime":"2025-12-13T06:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.470984 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:05Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.486710 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:05Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.504097 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:05Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.522869 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:05Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.567234 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.567290 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.567301 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.567325 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.567336 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:05Z","lastTransitionTime":"2025-12-13T06:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.670620 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.670668 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.670679 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.670700 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.670713 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:05Z","lastTransitionTime":"2025-12-13T06:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.772863 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.772907 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.772940 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.772957 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.772967 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:05Z","lastTransitionTime":"2025-12-13T06:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.875884 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.875934 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.875947 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.875965 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.875977 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:05Z","lastTransitionTime":"2025-12-13T06:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.913792 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs\") pod \"network-metrics-daemon-tm62z\" (UID: \"226b24e2-92c6-43d1-a621-09702ffa8fd4\") " pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:05 crc kubenswrapper[5048]: E1213 06:30:05.914018 5048 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 13 06:30:05 crc kubenswrapper[5048]: E1213 06:30:05.914133 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs podName:226b24e2-92c6-43d1-a621-09702ffa8fd4 nodeName:}" failed. No retries permitted until 2025-12-13 06:30:06.914107975 +0000 UTC m=+40.780702556 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs") pod "network-metrics-daemon-tm62z" (UID: "226b24e2-92c6-43d1-a621-09702ffa8fd4") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.978361 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.978419 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.978459 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.978478 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:05 crc kubenswrapper[5048]: I1213 06:30:05.978492 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:05Z","lastTransitionTime":"2025-12-13T06:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.081399 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.081483 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.081494 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.081515 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.081530 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:06Z","lastTransitionTime":"2025-12-13T06:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.184838 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.184894 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.185012 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.185093 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.185111 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:06Z","lastTransitionTime":"2025-12-13T06:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.291222 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.291321 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.291334 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.291356 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.291369 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:06Z","lastTransitionTime":"2025-12-13T06:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.319932 5048 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.395223 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.395273 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.395289 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.395312 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.395326 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:06Z","lastTransitionTime":"2025-12-13T06:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.498210 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.498262 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.498272 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.498291 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.498302 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:06Z","lastTransitionTime":"2025-12-13T06:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.566184 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.566289 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.566351 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:06 crc kubenswrapper[5048]: E1213 06:30:06.566426 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:06 crc kubenswrapper[5048]: E1213 06:30:06.566574 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:06 crc kubenswrapper[5048]: E1213 06:30:06.566693 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.581893 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.596547 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.602879 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.602918 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.602929 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.602984 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.602998 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:06Z","lastTransitionTime":"2025-12-13T06:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.619536 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c15a65d5a7a28bcc0a5066c5dd5a81ab6f4055f55e87e4a0c2d5e0571e09eebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.635117 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.649920 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.666617 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.685921 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.705841 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.705948 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.706019 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.706031 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.706052 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.706071 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:06Z","lastTransitionTime":"2025-12-13T06:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.723175 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.740599 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.760880 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.787665 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.806554 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.809281 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.809452 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.809462 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.809479 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.809492 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:06Z","lastTransitionTime":"2025-12-13T06:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.824878 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.844730 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.862349 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.886156 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.917296 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.917349 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.917364 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.917390 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.917406 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:06Z","lastTransitionTime":"2025-12-13T06:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:06 crc kubenswrapper[5048]: I1213 06:30:06.925221 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs\") pod \"network-metrics-daemon-tm62z\" (UID: \"226b24e2-92c6-43d1-a621-09702ffa8fd4\") " pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:06 crc kubenswrapper[5048]: E1213 06:30:06.925597 5048 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 13 06:30:06 crc kubenswrapper[5048]: E1213 06:30:06.925727 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs podName:226b24e2-92c6-43d1-a621-09702ffa8fd4 nodeName:}" failed. No retries permitted until 2025-12-13 06:30:08.92569528 +0000 UTC m=+42.792290031 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs") pod "network-metrics-daemon-tm62z" (UID: "226b24e2-92c6-43d1-a621-09702ffa8fd4") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.025480 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.026045 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.026065 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.026095 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.026109 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:07Z","lastTransitionTime":"2025-12-13T06:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.049958 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.067589 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.093517 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c15a65d5a7a28bcc0a5066c5dd5a81ab6f4055f55e87e4a0c2d5e0571e09eebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.113318 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.130292 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.130334 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.130348 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.130370 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.130383 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:07Z","lastTransitionTime":"2025-12-13T06:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.132949 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.154160 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.174191 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.201982 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.221905 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.235318 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.235361 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.235370 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.235391 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.235402 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:07Z","lastTransitionTime":"2025-12-13T06:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.238261 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.254824 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.275496 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.293048 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.311987 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.325011 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" event={"ID":"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9","Type":"ContainerStarted","Data":"63a4b5bf19a626d27b5ff5bbe0faac497702e18b0003790f0165d788dcc3326b"} Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.325083 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" event={"ID":"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9","Type":"ContainerStarted","Data":"c880cc39f16d08a56ae903058580b30b83005a94239bc86b63ce2827d3e3338e"} Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.328720 5048 generic.go:334] "Generic (PLEG): container finished" podID="8e1f99bb-aa5e-40eb-9b21-ff04d41acf50" containerID="c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9" exitCode=0 Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.328782 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" event={"ID":"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50","Type":"ContainerDied","Data":"c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9"} Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.338863 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.340667 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.340706 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.340718 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.340740 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.340753 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:07Z","lastTransitionTime":"2025-12-13T06:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.373474 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.400070 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.420584 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.442806 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.444429 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.444521 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.444535 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.444561 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.444576 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:07Z","lastTransitionTime":"2025-12-13T06:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.455844 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.470461 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.497461 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c15a65d5a7a28bcc0a5066c5dd5a81ab6f4055f55e87e4a0c2d5e0571e09eebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.512000 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c880cc39f16d08a56ae903058580b30b83005a94239bc86b63ce2827d3e3338e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63a4b5bf19a626d27b5ff5bbe0faac497702e18b0003790f0165d788dcc3326b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.527603 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.543363 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.548478 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.548546 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.548564 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.548593 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.548612 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:07Z","lastTransitionTime":"2025-12-13T06:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.562382 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.568668 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:07 crc kubenswrapper[5048]: E1213 06:30:07.568889 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.582465 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.597434 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.625896 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.646391 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.651626 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.651668 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.651680 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.651700 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.651713 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:07Z","lastTransitionTime":"2025-12-13T06:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.667260 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.684226 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.705016 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.724580 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.743749 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:07Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.756278 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.756328 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.756339 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.756358 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.756369 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:07Z","lastTransitionTime":"2025-12-13T06:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.859822 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.859864 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.859875 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.859895 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.859906 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:07Z","lastTransitionTime":"2025-12-13T06:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.962775 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.962834 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.962848 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.962870 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:07 crc kubenswrapper[5048]: I1213 06:30:07.962883 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:07Z","lastTransitionTime":"2025-12-13T06:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.066141 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.066206 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.066222 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.066249 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.066269 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:08Z","lastTransitionTime":"2025-12-13T06:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.169784 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.169823 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.169832 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.169848 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.169859 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:08Z","lastTransitionTime":"2025-12-13T06:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.272522 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.272580 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.272591 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.272612 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.272625 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:08Z","lastTransitionTime":"2025-12-13T06:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.341394 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" event={"ID":"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50","Type":"ContainerStarted","Data":"db7dc4c24b2f287638a86fcc16432cadbd96b03b50a7cb88b8b59a178a89001a"} Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.360143 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:08Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.376201 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.376268 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.376279 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.376300 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.376313 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:08Z","lastTransitionTime":"2025-12-13T06:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.385018 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c15a65d5a7a28bcc0a5066c5dd5a81ab6f4055f55e87e4a0c2d5e0571e09eebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:08Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.399873 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c880cc39f16d08a56ae903058580b30b83005a94239bc86b63ce2827d3e3338e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63a4b5bf19a626d27b5ff5bbe0faac497702e18b0003790f0165d788dcc3326b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:08Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.414941 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:08Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.429298 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:08Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.449021 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:08Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.465781 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:08Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.480351 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.480406 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.480418 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.480460 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.480476 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:08Z","lastTransitionTime":"2025-12-13T06:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.483237 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:08Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.505004 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:08Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.524219 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:08Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.544334 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db7dc4c24b2f287638a86fcc16432cadbd96b03b50a7cb88b8b59a178a89001a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:08Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.561709 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:08Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.567045 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.567200 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.567277 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:08 crc kubenswrapper[5048]: E1213 06:30:08.567398 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:08 crc kubenswrapper[5048]: E1213 06:30:08.568025 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:08 crc kubenswrapper[5048]: E1213 06:30:08.568116 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.583210 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:08Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.584558 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.584625 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.584639 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.584663 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.584678 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:08Z","lastTransitionTime":"2025-12-13T06:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.600240 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:08Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.615121 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:08Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.636157 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:08Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.651295 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:08Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.687964 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.688007 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.688049 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.688073 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.688088 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:08Z","lastTransitionTime":"2025-12-13T06:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.791370 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.791415 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.791425 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.791460 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.791475 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:08Z","lastTransitionTime":"2025-12-13T06:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.895235 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.895305 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.895317 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.895339 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.895350 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:08Z","lastTransitionTime":"2025-12-13T06:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:08 crc kubenswrapper[5048]: I1213 06:30:08.955668 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs\") pod \"network-metrics-daemon-tm62z\" (UID: \"226b24e2-92c6-43d1-a621-09702ffa8fd4\") " pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:08 crc kubenswrapper[5048]: E1213 06:30:08.955969 5048 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 13 06:30:08 crc kubenswrapper[5048]: E1213 06:30:08.956093 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs podName:226b24e2-92c6-43d1-a621-09702ffa8fd4 nodeName:}" failed. No retries permitted until 2025-12-13 06:30:12.956066087 +0000 UTC m=+46.822660668 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs") pod "network-metrics-daemon-tm62z" (UID: "226b24e2-92c6-43d1-a621-09702ffa8fd4") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:08.999954 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.000021 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.000034 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.000056 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.000073 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:09Z","lastTransitionTime":"2025-12-13T06:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.104140 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.104207 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.104220 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.104247 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.104262 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:09Z","lastTransitionTime":"2025-12-13T06:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.207404 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.207476 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.207487 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.207509 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.207523 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:09Z","lastTransitionTime":"2025-12-13T06:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.311112 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.311191 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.311206 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.311237 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.311249 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:09Z","lastTransitionTime":"2025-12-13T06:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.346349 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hfgcf_caf986e7-b521-40fd-ae26-18716730d57d/ovnkube-controller/0.log" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.349505 5048 generic.go:334] "Generic (PLEG): container finished" podID="caf986e7-b521-40fd-ae26-18716730d57d" containerID="c15a65d5a7a28bcc0a5066c5dd5a81ab6f4055f55e87e4a0c2d5e0571e09eebe" exitCode=1 Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.349556 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerDied","Data":"c15a65d5a7a28bcc0a5066c5dd5a81ab6f4055f55e87e4a0c2d5e0571e09eebe"} Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.350268 5048 scope.go:117] "RemoveContainer" containerID="c15a65d5a7a28bcc0a5066c5dd5a81ab6f4055f55e87e4a0c2d5e0571e09eebe" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.366858 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:09Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.387513 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c15a65d5a7a28bcc0a5066c5dd5a81ab6f4055f55e87e4a0c2d5e0571e09eebe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c15a65d5a7a28bcc0a5066c5dd5a81ab6f4055f55e87e4a0c2d5e0571e09eebe\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"message\\\":\\\"s) from k8s.io/client-go/informers/factory.go:160\\\\nI1213 06:30:08.653725 6317 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1213 06:30:08.654059 6317 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1213 06:30:08.654605 6317 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1213 06:30:08.654674 6317 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1213 06:30:08.654692 6317 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1213 06:30:08.654727 6317 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1213 06:30:08.654740 6317 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1213 06:30:08.654772 6317 factory.go:656] Stopping watch factory\\\\nI1213 06:30:08.654784 6317 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1213 06:30:08.654804 6317 ovnkube.go:599] Stopped ovnkube\\\\nI1213 06:30:08.654806 6317 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1213 06:30:08.654807 6317 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1213 06:30:08.654846 6317 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1213 06\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:09Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.402866 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c880cc39f16d08a56ae903058580b30b83005a94239bc86b63ce2827d3e3338e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63a4b5bf19a626d27b5ff5bbe0faac497702e18b0003790f0165d788dcc3326b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:09Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.413853 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.413907 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.413918 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.413935 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.413946 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:09Z","lastTransitionTime":"2025-12-13T06:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.420231 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:09Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.443946 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:09Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.463040 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:09Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.479187 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:09Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.497259 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:09Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.518379 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:09Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.525749 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.525807 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.525823 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.525843 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.525861 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:09Z","lastTransitionTime":"2025-12-13T06:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.556271 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:09Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.566513 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:09 crc kubenswrapper[5048]: E1213 06:30:09.566724 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.588127 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db7dc4c24b2f287638a86fcc16432cadbd96b03b50a7cb88b8b59a178a89001a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:09Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.602163 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:09Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.620145 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:09Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.630407 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.630484 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.630498 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.630522 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.630536 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:09Z","lastTransitionTime":"2025-12-13T06:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.634814 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:09Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.649221 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:09Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.668652 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:09Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.681553 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:09Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.733000 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.733050 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.733063 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.733083 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.733094 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:09Z","lastTransitionTime":"2025-12-13T06:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.837046 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.837118 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.837135 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.837161 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.837178 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:09Z","lastTransitionTime":"2025-12-13T06:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.939782 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.939836 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.939846 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.939865 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:09 crc kubenswrapper[5048]: I1213 06:30:09.939876 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:09Z","lastTransitionTime":"2025-12-13T06:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.046165 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.046233 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.046255 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.046282 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.046313 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:10Z","lastTransitionTime":"2025-12-13T06:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.149274 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.149315 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.149326 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.149343 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.149352 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:10Z","lastTransitionTime":"2025-12-13T06:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.252920 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.252987 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.252996 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.253018 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.253040 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:10Z","lastTransitionTime":"2025-12-13T06:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.356701 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.357313 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.357332 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.357363 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.357385 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:10Z","lastTransitionTime":"2025-12-13T06:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.357447 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hfgcf_caf986e7-b521-40fd-ae26-18716730d57d/ovnkube-controller/0.log" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.361104 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerStarted","Data":"f01e17a58a23bf59a1f30f8f3e0fa84f9a8e3557f77b4aa2509403086fcd597c"} Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.361280 5048 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.384265 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:10Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.400533 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:10Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.420487 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:10Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.443060 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:10Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.459903 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:10Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.460278 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.460307 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.460318 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.460338 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.460351 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:10Z","lastTransitionTime":"2025-12-13T06:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.476715 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:10Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.504646 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f01e17a58a23bf59a1f30f8f3e0fa84f9a8e3557f77b4aa2509403086fcd597c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c15a65d5a7a28bcc0a5066c5dd5a81ab6f4055f55e87e4a0c2d5e0571e09eebe\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"message\\\":\\\"s) from k8s.io/client-go/informers/factory.go:160\\\\nI1213 06:30:08.653725 6317 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1213 06:30:08.654059 6317 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1213 06:30:08.654605 6317 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1213 06:30:08.654674 6317 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1213 06:30:08.654692 6317 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1213 06:30:08.654727 6317 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1213 06:30:08.654740 6317 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1213 06:30:08.654772 6317 factory.go:656] Stopping watch factory\\\\nI1213 06:30:08.654784 6317 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1213 06:30:08.654804 6317 ovnkube.go:599] Stopped ovnkube\\\\nI1213 06:30:08.654806 6317 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1213 06:30:08.654807 6317 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1213 06:30:08.654846 6317 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1213 06\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:10Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.524579 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c880cc39f16d08a56ae903058580b30b83005a94239bc86b63ce2827d3e3338e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63a4b5bf19a626d27b5ff5bbe0faac497702e18b0003790f0165d788dcc3326b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:10Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.545472 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:10Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.564719 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.564774 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.564787 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.564809 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.564827 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:10Z","lastTransitionTime":"2025-12-13T06:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.568396 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.568478 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:10 crc kubenswrapper[5048]: E1213 06:30:10.568614 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.568677 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:10 crc kubenswrapper[5048]: E1213 06:30:10.568848 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:10 crc kubenswrapper[5048]: E1213 06:30:10.569023 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.585995 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:10Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.613408 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:10Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.639063 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:10Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.661050 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:10Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.668028 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.668304 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.668325 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.668386 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.668404 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:10Z","lastTransitionTime":"2025-12-13T06:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.679325 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:10Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.700002 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:10Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.717644 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db7dc4c24b2f287638a86fcc16432cadbd96b03b50a7cb88b8b59a178a89001a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:10Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.734389 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:10Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.771039 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.771101 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.771112 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.771140 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.771153 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:10Z","lastTransitionTime":"2025-12-13T06:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.874783 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.874830 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.874843 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.874863 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.874874 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:10Z","lastTransitionTime":"2025-12-13T06:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.978208 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.978251 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.978262 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.978279 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:10 crc kubenswrapper[5048]: I1213 06:30:10.978290 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:10Z","lastTransitionTime":"2025-12-13T06:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.082025 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.082067 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.082079 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.082096 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.082110 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:11Z","lastTransitionTime":"2025-12-13T06:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.184991 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.185352 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.185471 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.185644 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.185736 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:11Z","lastTransitionTime":"2025-12-13T06:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.288758 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.288811 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.288824 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.288843 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.288858 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:11Z","lastTransitionTime":"2025-12-13T06:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.391746 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.391784 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.391796 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.391812 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.391823 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:11Z","lastTransitionTime":"2025-12-13T06:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.494219 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.494273 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.494285 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.494306 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.494322 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:11Z","lastTransitionTime":"2025-12-13T06:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.566538 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:11 crc kubenswrapper[5048]: E1213 06:30:11.566704 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.597793 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.597848 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.597859 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.597880 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.597891 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:11Z","lastTransitionTime":"2025-12-13T06:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.700505 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.700569 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.700583 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.700655 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.700672 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:11Z","lastTransitionTime":"2025-12-13T06:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.804484 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.804549 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.804561 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.804677 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.804688 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:11Z","lastTransitionTime":"2025-12-13T06:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.909216 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.909251 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.909262 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.909283 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:11 crc kubenswrapper[5048]: I1213 06:30:11.909295 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:11Z","lastTransitionTime":"2025-12-13T06:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.011916 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.011967 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.011978 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.011995 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.012014 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:12Z","lastTransitionTime":"2025-12-13T06:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.115631 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.115695 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.115711 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.115745 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.115765 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:12Z","lastTransitionTime":"2025-12-13T06:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.219681 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.219728 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.219737 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.219754 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.219764 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:12Z","lastTransitionTime":"2025-12-13T06:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.322832 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.322898 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.322919 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.322942 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.322956 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:12Z","lastTransitionTime":"2025-12-13T06:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.370007 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hfgcf_caf986e7-b521-40fd-ae26-18716730d57d/ovnkube-controller/1.log" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.371155 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hfgcf_caf986e7-b521-40fd-ae26-18716730d57d/ovnkube-controller/0.log" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.374789 5048 generic.go:334] "Generic (PLEG): container finished" podID="caf986e7-b521-40fd-ae26-18716730d57d" containerID="f01e17a58a23bf59a1f30f8f3e0fa84f9a8e3557f77b4aa2509403086fcd597c" exitCode=1 Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.374851 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerDied","Data":"f01e17a58a23bf59a1f30f8f3e0fa84f9a8e3557f77b4aa2509403086fcd597c"} Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.374937 5048 scope.go:117] "RemoveContainer" containerID="c15a65d5a7a28bcc0a5066c5dd5a81ab6f4055f55e87e4a0c2d5e0571e09eebe" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.376107 5048 scope.go:117] "RemoveContainer" containerID="f01e17a58a23bf59a1f30f8f3e0fa84f9a8e3557f77b4aa2509403086fcd597c" Dec 13 06:30:12 crc kubenswrapper[5048]: E1213 06:30:12.376331 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hfgcf_openshift-ovn-kubernetes(caf986e7-b521-40fd-ae26-18716730d57d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" podUID="caf986e7-b521-40fd-ae26-18716730d57d" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.396398 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:12Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.413720 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:12Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.426713 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.426767 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.426780 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.426803 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.426817 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:12Z","lastTransitionTime":"2025-12-13T06:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.432187 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:12Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.451063 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:12Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.466370 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:12Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.480262 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:12Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.508031 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f01e17a58a23bf59a1f30f8f3e0fa84f9a8e3557f77b4aa2509403086fcd597c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c15a65d5a7a28bcc0a5066c5dd5a81ab6f4055f55e87e4a0c2d5e0571e09eebe\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"message\\\":\\\"s) from k8s.io/client-go/informers/factory.go:160\\\\nI1213 06:30:08.653725 6317 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1213 06:30:08.654059 6317 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1213 06:30:08.654605 6317 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1213 06:30:08.654674 6317 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1213 06:30:08.654692 6317 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1213 06:30:08.654727 6317 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1213 06:30:08.654740 6317 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1213 06:30:08.654772 6317 factory.go:656] Stopping watch factory\\\\nI1213 06:30:08.654784 6317 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1213 06:30:08.654804 6317 ovnkube.go:599] Stopped ovnkube\\\\nI1213 06:30:08.654806 6317 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1213 06:30:08.654807 6317 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1213 06:30:08.654846 6317 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1213 06\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f01e17a58a23bf59a1f30f8f3e0fa84f9a8e3557f77b4aa2509403086fcd597c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:11Z\\\",\\\"message\\\":\\\"e]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.213:80:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {2ead45b3-c313-4fbc-a7bc-2b3c4ffd610c}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1213 06:30:10.597757 6539 ovnkube.go:599] Stopped ovnkube\\\\nI1213 06:30:10.597818 6539 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1213 06:30:10.597880 6539 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator\\\\\\\"}\\\\nI1213 06:30:10.596663 6539 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/catalog-operator-metrics\\\\\\\"}\\\\nF1213 06:30:10.597931 6539 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for namespace Informer during admin network policy controller initialization, handler {0x1fcbf20 0x1fcbc00 0x1fcbba0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotati\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:12Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.529766 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.529839 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.529852 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.529880 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.529894 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:12Z","lastTransitionTime":"2025-12-13T06:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.530540 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c880cc39f16d08a56ae903058580b30b83005a94239bc86b63ce2827d3e3338e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63a4b5bf19a626d27b5ff5bbe0faac497702e18b0003790f0165d788dcc3326b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:12Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.547493 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:12Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.566103 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.566138 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:12 crc kubenswrapper[5048]: E1213 06:30:12.566259 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.566346 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:12 crc kubenswrapper[5048]: E1213 06:30:12.566539 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:12 crc kubenswrapper[5048]: E1213 06:30:12.566711 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.566860 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:12Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.589935 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:12Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.608679 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:12Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.625611 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:12Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.632832 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.632878 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.632891 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.632912 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.632926 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:12Z","lastTransitionTime":"2025-12-13T06:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.643980 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:12Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.662580 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:12Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.684067 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db7dc4c24b2f287638a86fcc16432cadbd96b03b50a7cb88b8b59a178a89001a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:12Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.699508 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:12Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.736203 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.736278 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.736289 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.736313 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.736327 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:12Z","lastTransitionTime":"2025-12-13T06:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.839835 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.839908 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.839921 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.839944 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.839958 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:12Z","lastTransitionTime":"2025-12-13T06:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.943080 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.943138 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.943148 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.943171 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:12 crc kubenswrapper[5048]: I1213 06:30:12.943182 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:12Z","lastTransitionTime":"2025-12-13T06:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.008473 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs\") pod \"network-metrics-daemon-tm62z\" (UID: \"226b24e2-92c6-43d1-a621-09702ffa8fd4\") " pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:13 crc kubenswrapper[5048]: E1213 06:30:13.008734 5048 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 13 06:30:13 crc kubenswrapper[5048]: E1213 06:30:13.008873 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs podName:226b24e2-92c6-43d1-a621-09702ffa8fd4 nodeName:}" failed. No retries permitted until 2025-12-13 06:30:21.008841613 +0000 UTC m=+54.875436194 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs") pod "network-metrics-daemon-tm62z" (UID: "226b24e2-92c6-43d1-a621-09702ffa8fd4") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.046298 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.046349 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.046362 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.046384 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.046397 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:13Z","lastTransitionTime":"2025-12-13T06:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.149574 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.150251 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.150323 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.150428 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.150638 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:13Z","lastTransitionTime":"2025-12-13T06:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.254116 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.254197 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.254209 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.254230 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.254244 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:13Z","lastTransitionTime":"2025-12-13T06:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.357001 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.357073 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.357084 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.357104 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.357118 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:13Z","lastTransitionTime":"2025-12-13T06:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.385211 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hfgcf_caf986e7-b521-40fd-ae26-18716730d57d/ovnkube-controller/1.log" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.460786 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.460864 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.460877 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.460898 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.460912 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:13Z","lastTransitionTime":"2025-12-13T06:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.563672 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.563726 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.563737 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.563766 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.563783 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:13Z","lastTransitionTime":"2025-12-13T06:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.565961 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:13 crc kubenswrapper[5048]: E1213 06:30:13.566130 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.667164 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.667203 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.667211 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.667224 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.667232 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:13Z","lastTransitionTime":"2025-12-13T06:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.771702 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.771771 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.771784 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.771816 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.771832 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:13Z","lastTransitionTime":"2025-12-13T06:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.874932 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.874982 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.874993 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.875012 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.875025 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:13Z","lastTransitionTime":"2025-12-13T06:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.977387 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.977534 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.977554 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.977576 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:13 crc kubenswrapper[5048]: I1213 06:30:13.977590 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:13Z","lastTransitionTime":"2025-12-13T06:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.082155 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.082231 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.082250 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.082276 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.082321 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:14Z","lastTransitionTime":"2025-12-13T06:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.185860 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.185904 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.185919 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.185942 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.185956 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:14Z","lastTransitionTime":"2025-12-13T06:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.289414 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.289519 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.289530 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.289590 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.289606 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:14Z","lastTransitionTime":"2025-12-13T06:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.392617 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.392682 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.392692 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.392712 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.392728 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:14Z","lastTransitionTime":"2025-12-13T06:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.496129 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.496201 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.496218 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.496244 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.496260 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:14Z","lastTransitionTime":"2025-12-13T06:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.566375 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.566538 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:14 crc kubenswrapper[5048]: E1213 06:30:14.566656 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.566785 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:14 crc kubenswrapper[5048]: E1213 06:30:14.566923 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:14 crc kubenswrapper[5048]: E1213 06:30:14.567090 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.599221 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.599273 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.599282 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.599297 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.599309 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:14Z","lastTransitionTime":"2025-12-13T06:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.703145 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.703198 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.703212 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.703229 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.703243 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:14Z","lastTransitionTime":"2025-12-13T06:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.722396 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.722466 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.722481 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.722506 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.722521 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:14Z","lastTransitionTime":"2025-12-13T06:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:14 crc kubenswrapper[5048]: E1213 06:30:14.736614 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:14Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.741602 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.741655 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.741668 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.741689 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.741703 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:14Z","lastTransitionTime":"2025-12-13T06:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:14 crc kubenswrapper[5048]: E1213 06:30:14.757090 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:14Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.761653 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.761692 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.761702 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.761719 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.761731 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:14Z","lastTransitionTime":"2025-12-13T06:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:14 crc kubenswrapper[5048]: E1213 06:30:14.778470 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:14Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.785325 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.785376 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.785387 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.785408 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.785418 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:14Z","lastTransitionTime":"2025-12-13T06:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:14 crc kubenswrapper[5048]: E1213 06:30:14.802855 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:14Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.807737 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.807778 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.807794 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.807816 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.807828 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:14Z","lastTransitionTime":"2025-12-13T06:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:14 crc kubenswrapper[5048]: E1213 06:30:14.823778 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:14Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:14 crc kubenswrapper[5048]: E1213 06:30:14.823924 5048 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.826517 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.826590 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.826604 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.826630 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.826645 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:14Z","lastTransitionTime":"2025-12-13T06:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.929909 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.929974 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.929989 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.930014 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:14 crc kubenswrapper[5048]: I1213 06:30:14.930031 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:14Z","lastTransitionTime":"2025-12-13T06:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.038556 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.038646 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.038660 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.038694 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.038706 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:15Z","lastTransitionTime":"2025-12-13T06:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.141588 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.141663 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.141678 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.141702 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.141714 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:15Z","lastTransitionTime":"2025-12-13T06:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.244202 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.244531 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.244600 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.244674 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.244774 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:15Z","lastTransitionTime":"2025-12-13T06:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.347769 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.348205 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.348296 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.348409 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.348514 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:15Z","lastTransitionTime":"2025-12-13T06:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.451242 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.451296 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.451310 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.451334 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.451349 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:15Z","lastTransitionTime":"2025-12-13T06:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.554729 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.554783 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.554795 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.554816 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.554826 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:15Z","lastTransitionTime":"2025-12-13T06:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.566540 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:15 crc kubenswrapper[5048]: E1213 06:30:15.566719 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.657786 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.657841 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.657851 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.657870 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.657881 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:15Z","lastTransitionTime":"2025-12-13T06:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.761362 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.761406 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.761417 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.761451 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.761493 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:15Z","lastTransitionTime":"2025-12-13T06:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.865358 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.865425 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.865474 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.865499 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.865515 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:15Z","lastTransitionTime":"2025-12-13T06:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.968673 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.968746 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.968763 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.968788 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:15 crc kubenswrapper[5048]: I1213 06:30:15.968807 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:15Z","lastTransitionTime":"2025-12-13T06:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.072329 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.072387 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.072403 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.072424 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.072458 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:16Z","lastTransitionTime":"2025-12-13T06:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.175384 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.175501 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.175518 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.175539 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.175550 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:16Z","lastTransitionTime":"2025-12-13T06:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.279186 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.279249 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.279263 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.279287 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.279302 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:16Z","lastTransitionTime":"2025-12-13T06:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.382194 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.382277 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.382288 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.382329 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.382341 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:16Z","lastTransitionTime":"2025-12-13T06:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.485187 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.485235 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.485247 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.485269 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.485282 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:16Z","lastTransitionTime":"2025-12-13T06:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.566753 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.566814 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.566753 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:16 crc kubenswrapper[5048]: E1213 06:30:16.566913 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:16 crc kubenswrapper[5048]: E1213 06:30:16.567003 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:16 crc kubenswrapper[5048]: E1213 06:30:16.567207 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.586316 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:16Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.587455 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.587509 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.587522 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.587547 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.587567 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:16Z","lastTransitionTime":"2025-12-13T06:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.601392 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:16Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.614015 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:16Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.634327 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f01e17a58a23bf59a1f30f8f3e0fa84f9a8e3557f77b4aa2509403086fcd597c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c15a65d5a7a28bcc0a5066c5dd5a81ab6f4055f55e87e4a0c2d5e0571e09eebe\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"message\\\":\\\"s) from k8s.io/client-go/informers/factory.go:160\\\\nI1213 06:30:08.653725 6317 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1213 06:30:08.654059 6317 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1213 06:30:08.654605 6317 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1213 06:30:08.654674 6317 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1213 06:30:08.654692 6317 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1213 06:30:08.654727 6317 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1213 06:30:08.654740 6317 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1213 06:30:08.654772 6317 factory.go:656] Stopping watch factory\\\\nI1213 06:30:08.654784 6317 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1213 06:30:08.654804 6317 ovnkube.go:599] Stopped ovnkube\\\\nI1213 06:30:08.654806 6317 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1213 06:30:08.654807 6317 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1213 06:30:08.654846 6317 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1213 06\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f01e17a58a23bf59a1f30f8f3e0fa84f9a8e3557f77b4aa2509403086fcd597c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:11Z\\\",\\\"message\\\":\\\"e]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.213:80:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {2ead45b3-c313-4fbc-a7bc-2b3c4ffd610c}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1213 06:30:10.597757 6539 ovnkube.go:599] Stopped ovnkube\\\\nI1213 06:30:10.597818 6539 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1213 06:30:10.597880 6539 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator\\\\\\\"}\\\\nI1213 06:30:10.596663 6539 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/catalog-operator-metrics\\\\\\\"}\\\\nF1213 06:30:10.597931 6539 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for namespace Informer during admin network policy controller initialization, handler {0x1fcbf20 0x1fcbc00 0x1fcbba0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotati\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:16Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.646345 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c880cc39f16d08a56ae903058580b30b83005a94239bc86b63ce2827d3e3338e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63a4b5bf19a626d27b5ff5bbe0faac497702e18b0003790f0165d788dcc3326b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:16Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.661566 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:16Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.677553 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:16Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.691078 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.691118 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.691127 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.691145 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.691155 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:16Z","lastTransitionTime":"2025-12-13T06:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.696127 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:16Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.726977 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:16Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.742674 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:16Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.758972 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:16Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.778303 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:16Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.794458 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.794511 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.794523 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.794544 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.794558 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:16Z","lastTransitionTime":"2025-12-13T06:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.797552 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db7dc4c24b2f287638a86fcc16432cadbd96b03b50a7cb88b8b59a178a89001a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:16Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.809088 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:16Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.823360 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:16Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.839862 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:16Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.855858 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:16Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.897793 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.897835 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.897847 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.897864 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.897876 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:16Z","lastTransitionTime":"2025-12-13T06:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.949782 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.950843 5048 scope.go:117] "RemoveContainer" containerID="f01e17a58a23bf59a1f30f8f3e0fa84f9a8e3557f77b4aa2509403086fcd597c" Dec 13 06:30:16 crc kubenswrapper[5048]: E1213 06:30:16.951069 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hfgcf_openshift-ovn-kubernetes(caf986e7-b521-40fd-ae26-18716730d57d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" podUID="caf986e7-b521-40fd-ae26-18716730d57d" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.969174 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c880cc39f16d08a56ae903058580b30b83005a94239bc86b63ce2827d3e3338e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63a4b5bf19a626d27b5ff5bbe0faac497702e18b0003790f0165d788dcc3326b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:16Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:16 crc kubenswrapper[5048]: I1213 06:30:16.982882 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:16Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.001243 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.001287 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.001296 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.001313 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.001325 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:17Z","lastTransitionTime":"2025-12-13T06:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.006252 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f01e17a58a23bf59a1f30f8f3e0fa84f9a8e3557f77b4aa2509403086fcd597c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f01e17a58a23bf59a1f30f8f3e0fa84f9a8e3557f77b4aa2509403086fcd597c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:11Z\\\",\\\"message\\\":\\\"e]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.213:80:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {2ead45b3-c313-4fbc-a7bc-2b3c4ffd610c}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1213 06:30:10.597757 6539 ovnkube.go:599] Stopped ovnkube\\\\nI1213 06:30:10.597818 6539 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1213 06:30:10.597880 6539 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator\\\\\\\"}\\\\nI1213 06:30:10.596663 6539 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/catalog-operator-metrics\\\\\\\"}\\\\nF1213 06:30:10.597931 6539 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for namespace Informer during admin network policy controller initialization, handler {0x1fcbf20 0x1fcbc00 0x1fcbba0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotati\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hfgcf_openshift-ovn-kubernetes(caf986e7-b521-40fd-ae26-18716730d57d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:17Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.021583 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:17Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.036422 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:17Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.051216 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:17Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.066922 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:17Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.084588 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:17Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.104573 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.104642 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.104654 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.104676 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.104687 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:17Z","lastTransitionTime":"2025-12-13T06:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.109095 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:17Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.122521 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:17Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.139693 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db7dc4c24b2f287638a86fcc16432cadbd96b03b50a7cb88b8b59a178a89001a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:17Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.152547 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:17Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.163851 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:17Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.176352 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:17Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.191588 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:17Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.206207 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:17Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.207711 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.207758 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.207770 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.207789 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.207802 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:17Z","lastTransitionTime":"2025-12-13T06:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.221569 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:17Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.310817 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.310985 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.310999 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.311022 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.311036 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:17Z","lastTransitionTime":"2025-12-13T06:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.413748 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.413802 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.413816 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.413843 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.413859 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:17Z","lastTransitionTime":"2025-12-13T06:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.516607 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.516733 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.516747 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.516773 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.516786 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:17Z","lastTransitionTime":"2025-12-13T06:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.566558 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:17 crc kubenswrapper[5048]: E1213 06:30:17.566808 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.620550 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.620612 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.620628 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.620649 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.620662 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:17Z","lastTransitionTime":"2025-12-13T06:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.724345 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.724413 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.724426 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.724467 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.724485 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:17Z","lastTransitionTime":"2025-12-13T06:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.827807 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.827867 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.827881 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.827906 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.827921 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:17Z","lastTransitionTime":"2025-12-13T06:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.930422 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.930485 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.930496 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.930515 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:17 crc kubenswrapper[5048]: I1213 06:30:17.930554 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:17Z","lastTransitionTime":"2025-12-13T06:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.033473 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.033534 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.033546 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.033570 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.033583 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:18Z","lastTransitionTime":"2025-12-13T06:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.136637 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.136689 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.136704 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.136726 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.136740 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:18Z","lastTransitionTime":"2025-12-13T06:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.240977 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.241025 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.241053 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.241071 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.241083 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:18Z","lastTransitionTime":"2025-12-13T06:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.343797 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.343846 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.343855 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.343874 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.343889 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:18Z","lastTransitionTime":"2025-12-13T06:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.447112 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.447165 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.447174 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.447192 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.447205 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:18Z","lastTransitionTime":"2025-12-13T06:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.550638 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.550696 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.550707 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.550726 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.550742 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:18Z","lastTransitionTime":"2025-12-13T06:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.566809 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.566858 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.566846 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:18 crc kubenswrapper[5048]: E1213 06:30:18.567020 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:18 crc kubenswrapper[5048]: E1213 06:30:18.567140 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:18 crc kubenswrapper[5048]: E1213 06:30:18.567365 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.653588 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.653626 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.653635 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.653652 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.653663 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:18Z","lastTransitionTime":"2025-12-13T06:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.756967 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.757003 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.757012 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.757031 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.757045 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:18Z","lastTransitionTime":"2025-12-13T06:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.860390 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.860485 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.860499 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.860526 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.860550 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:18Z","lastTransitionTime":"2025-12-13T06:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.963791 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.963861 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.963881 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.963905 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:18 crc kubenswrapper[5048]: I1213 06:30:18.963920 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:18Z","lastTransitionTime":"2025-12-13T06:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.067384 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.067445 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.067458 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.067479 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.067493 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:19Z","lastTransitionTime":"2025-12-13T06:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.171431 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.171503 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.171516 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.171541 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.171554 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:19Z","lastTransitionTime":"2025-12-13T06:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.275108 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.275181 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.275195 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.275221 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.275239 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:19Z","lastTransitionTime":"2025-12-13T06:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.378584 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.378684 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.378698 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.378734 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.378755 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:19Z","lastTransitionTime":"2025-12-13T06:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.481298 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.481352 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.481362 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.481382 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.481398 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:19Z","lastTransitionTime":"2025-12-13T06:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.566400 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:19 crc kubenswrapper[5048]: E1213 06:30:19.566676 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.584564 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.584615 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.584628 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.584649 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.584664 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:19Z","lastTransitionTime":"2025-12-13T06:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.688901 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.688959 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.688978 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.689006 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.689023 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:19Z","lastTransitionTime":"2025-12-13T06:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.791361 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.791422 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.791465 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.791480 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.791491 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:19Z","lastTransitionTime":"2025-12-13T06:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.795918 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.796129 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:19 crc kubenswrapper[5048]: E1213 06:30:19.796307 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:30:51.796198976 +0000 UTC m=+85.662793557 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:30:19 crc kubenswrapper[5048]: E1213 06:30:19.796397 5048 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 13 06:30:19 crc kubenswrapper[5048]: E1213 06:30:19.796630 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-13 06:30:51.796544685 +0000 UTC m=+85.663139266 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.894719 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.894795 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.894808 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.894825 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.894838 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:19Z","lastTransitionTime":"2025-12-13T06:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.897032 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.897103 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:19 crc kubenswrapper[5048]: E1213 06:30:19.897123 5048 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 13 06:30:19 crc kubenswrapper[5048]: E1213 06:30:19.897168 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-13 06:30:51.897155144 +0000 UTC m=+85.763749725 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 13 06:30:19 crc kubenswrapper[5048]: E1213 06:30:19.897185 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 13 06:30:19 crc kubenswrapper[5048]: E1213 06:30:19.897201 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 13 06:30:19 crc kubenswrapper[5048]: E1213 06:30:19.897212 5048 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.897125 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:19 crc kubenswrapper[5048]: E1213 06:30:19.897244 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-13 06:30:51.897234576 +0000 UTC m=+85.763829157 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:30:19 crc kubenswrapper[5048]: E1213 06:30:19.897321 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 13 06:30:19 crc kubenswrapper[5048]: E1213 06:30:19.897388 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 13 06:30:19 crc kubenswrapper[5048]: E1213 06:30:19.897402 5048 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:30:19 crc kubenswrapper[5048]: E1213 06:30:19.897496 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-13 06:30:51.897478022 +0000 UTC m=+85.764072623 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.998170 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.998218 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.998228 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.998243 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:19 crc kubenswrapper[5048]: I1213 06:30:19.998253 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:19Z","lastTransitionTime":"2025-12-13T06:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.101969 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.102019 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.102032 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.102048 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.102061 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:20Z","lastTransitionTime":"2025-12-13T06:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.205273 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.205319 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.205332 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.205358 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.205372 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:20Z","lastTransitionTime":"2025-12-13T06:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.308560 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.308618 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.308628 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.308650 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.308666 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:20Z","lastTransitionTime":"2025-12-13T06:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.411653 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.411724 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.411740 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.411767 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.411785 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:20Z","lastTransitionTime":"2025-12-13T06:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.514782 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.514850 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.514862 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.514889 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.514903 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:20Z","lastTransitionTime":"2025-12-13T06:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.566453 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.566490 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:20 crc kubenswrapper[5048]: E1213 06:30:20.566672 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.566543 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:20 crc kubenswrapper[5048]: E1213 06:30:20.566772 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:20 crc kubenswrapper[5048]: E1213 06:30:20.566824 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.618755 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.619748 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.619833 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.619896 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.619915 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:20Z","lastTransitionTime":"2025-12-13T06:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.722748 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.722870 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.722880 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.722898 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.722908 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:20Z","lastTransitionTime":"2025-12-13T06:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.825336 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.825394 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.825406 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.825426 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.825466 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:20Z","lastTransitionTime":"2025-12-13T06:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.929443 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.929527 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.929543 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.929598 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:20 crc kubenswrapper[5048]: I1213 06:30:20.929617 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:20Z","lastTransitionTime":"2025-12-13T06:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.011648 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs\") pod \"network-metrics-daemon-tm62z\" (UID: \"226b24e2-92c6-43d1-a621-09702ffa8fd4\") " pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:21 crc kubenswrapper[5048]: E1213 06:30:21.011911 5048 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 13 06:30:21 crc kubenswrapper[5048]: E1213 06:30:21.012011 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs podName:226b24e2-92c6-43d1-a621-09702ffa8fd4 nodeName:}" failed. No retries permitted until 2025-12-13 06:30:37.011985009 +0000 UTC m=+70.878579590 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs") pod "network-metrics-daemon-tm62z" (UID: "226b24e2-92c6-43d1-a621-09702ffa8fd4") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.033027 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.033087 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.033104 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.033124 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.033137 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:21Z","lastTransitionTime":"2025-12-13T06:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.136577 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.136644 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.136658 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.136685 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.136698 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:21Z","lastTransitionTime":"2025-12-13T06:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.239596 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.239660 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.239676 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.239702 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.239718 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:21Z","lastTransitionTime":"2025-12-13T06:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.343748 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.343814 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.343833 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.343856 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.344059 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:21Z","lastTransitionTime":"2025-12-13T06:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.446544 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.446592 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.446601 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.446618 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.446630 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:21Z","lastTransitionTime":"2025-12-13T06:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.549938 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.550013 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.550029 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.550059 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.550077 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:21Z","lastTransitionTime":"2025-12-13T06:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.566560 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:21 crc kubenswrapper[5048]: E1213 06:30:21.566782 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.653199 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.653258 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.653267 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.653288 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.653299 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:21Z","lastTransitionTime":"2025-12-13T06:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.756082 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.756167 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.756179 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.756197 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.756208 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:21Z","lastTransitionTime":"2025-12-13T06:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.860735 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.860815 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.860826 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.860845 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.860855 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:21Z","lastTransitionTime":"2025-12-13T06:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.964030 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.964105 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.964117 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.964138 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:21 crc kubenswrapper[5048]: I1213 06:30:21.964152 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:21Z","lastTransitionTime":"2025-12-13T06:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.067969 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.068026 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.068038 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.068060 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.068076 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:22Z","lastTransitionTime":"2025-12-13T06:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.170680 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.170724 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.170752 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.170769 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.170781 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:22Z","lastTransitionTime":"2025-12-13T06:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.274423 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.274502 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.274514 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.274531 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.274548 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:22Z","lastTransitionTime":"2025-12-13T06:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.378280 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.378387 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.378400 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.378419 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.378481 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:22Z","lastTransitionTime":"2025-12-13T06:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.482374 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.482559 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.482581 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.482605 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.482643 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:22Z","lastTransitionTime":"2025-12-13T06:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.566622 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.566834 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:22 crc kubenswrapper[5048]: E1213 06:30:22.566892 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:22 crc kubenswrapper[5048]: E1213 06:30:22.567069 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.566648 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:22 crc kubenswrapper[5048]: E1213 06:30:22.567273 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.586174 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.586247 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.586257 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.586280 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.586293 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:22Z","lastTransitionTime":"2025-12-13T06:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.689920 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.690002 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.690015 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.690046 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.690065 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:22Z","lastTransitionTime":"2025-12-13T06:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.793587 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.793636 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.793647 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.793694 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.793705 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:22Z","lastTransitionTime":"2025-12-13T06:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.896328 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.896366 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.896374 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.896389 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.896398 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:22Z","lastTransitionTime":"2025-12-13T06:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.998389 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.998431 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.998441 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.998454 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:22 crc kubenswrapper[5048]: I1213 06:30:22.998494 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:22Z","lastTransitionTime":"2025-12-13T06:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.101376 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.101441 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.101489 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.101512 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.101532 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:23Z","lastTransitionTime":"2025-12-13T06:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.204650 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.204726 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.204744 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.204772 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.204790 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:23Z","lastTransitionTime":"2025-12-13T06:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.307251 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.307297 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.307308 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.307324 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.307334 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:23Z","lastTransitionTime":"2025-12-13T06:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.409864 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.409915 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.409926 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.409946 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.409961 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:23Z","lastTransitionTime":"2025-12-13T06:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.512872 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.512919 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.512931 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.512949 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.512962 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:23Z","lastTransitionTime":"2025-12-13T06:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.566766 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:23 crc kubenswrapper[5048]: E1213 06:30:23.566902 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.615408 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.615468 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.615477 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.615493 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.615502 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:23Z","lastTransitionTime":"2025-12-13T06:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.717861 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.717949 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.717968 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.717999 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.718021 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:23Z","lastTransitionTime":"2025-12-13T06:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.786243 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.800558 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.810230 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:23Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.821389 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.821632 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.821696 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.821734 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.821754 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:23Z","lastTransitionTime":"2025-12-13T06:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.841324 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:23Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.867155 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:23Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.906305 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:23Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.923959 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:23Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.925630 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.925763 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.925833 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.925927 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.925994 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:23Z","lastTransitionTime":"2025-12-13T06:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.943491 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:23Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.960851 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:23Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.978520 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db7dc4c24b2f287638a86fcc16432cadbd96b03b50a7cb88b8b59a178a89001a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:23Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:23 crc kubenswrapper[5048]: I1213 06:30:23.993172 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:23Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.020030 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:24Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.029522 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.029562 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.029605 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.029629 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.029641 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:24Z","lastTransitionTime":"2025-12-13T06:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.035121 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:24Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.049519 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:24Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.065323 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:24Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.075765 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:24Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.087028 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:24Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.109079 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f01e17a58a23bf59a1f30f8f3e0fa84f9a8e3557f77b4aa2509403086fcd597c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f01e17a58a23bf59a1f30f8f3e0fa84f9a8e3557f77b4aa2509403086fcd597c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:11Z\\\",\\\"message\\\":\\\"e]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.213:80:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {2ead45b3-c313-4fbc-a7bc-2b3c4ffd610c}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1213 06:30:10.597757 6539 ovnkube.go:599] Stopped ovnkube\\\\nI1213 06:30:10.597818 6539 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1213 06:30:10.597880 6539 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator\\\\\\\"}\\\\nI1213 06:30:10.596663 6539 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/catalog-operator-metrics\\\\\\\"}\\\\nF1213 06:30:10.597931 6539 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for namespace Informer during admin network policy controller initialization, handler {0x1fcbf20 0x1fcbc00 0x1fcbba0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotati\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hfgcf_openshift-ovn-kubernetes(caf986e7-b521-40fd-ae26-18716730d57d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:24Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.269263 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.269345 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.269364 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.269391 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.269408 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:24Z","lastTransitionTime":"2025-12-13T06:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.280129 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c880cc39f16d08a56ae903058580b30b83005a94239bc86b63ce2827d3e3338e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63a4b5bf19a626d27b5ff5bbe0faac497702e18b0003790f0165d788dcc3326b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:24Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.372423 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.373001 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.373065 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.373125 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.373181 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:24Z","lastTransitionTime":"2025-12-13T06:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.476123 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.476376 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.476397 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.476424 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.476495 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:24Z","lastTransitionTime":"2025-12-13T06:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.566612 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:24 crc kubenswrapper[5048]: E1213 06:30:24.566807 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.566612 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.566842 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:24 crc kubenswrapper[5048]: E1213 06:30:24.566928 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:24 crc kubenswrapper[5048]: E1213 06:30:24.567014 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.580387 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.580816 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.580894 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.581557 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.581642 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:24Z","lastTransitionTime":"2025-12-13T06:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.685154 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.685204 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.685212 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.685232 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.685241 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:24Z","lastTransitionTime":"2025-12-13T06:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.788792 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.788848 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.788860 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.788880 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.788893 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:24Z","lastTransitionTime":"2025-12-13T06:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.891899 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.891957 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.891970 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.891992 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.892004 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:24Z","lastTransitionTime":"2025-12-13T06:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.994278 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.994615 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.994695 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.994808 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:24 crc kubenswrapper[5048]: I1213 06:30:24.994898 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:24Z","lastTransitionTime":"2025-12-13T06:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.097897 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.097960 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.097974 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.097999 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.098016 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:25Z","lastTransitionTime":"2025-12-13T06:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.189815 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.189880 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.189892 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.189913 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.189926 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:25Z","lastTransitionTime":"2025-12-13T06:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:25 crc kubenswrapper[5048]: E1213 06:30:25.207018 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:25Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.212967 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.213008 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.213017 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.213033 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.213045 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:25Z","lastTransitionTime":"2025-12-13T06:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:25 crc kubenswrapper[5048]: E1213 06:30:25.227353 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:25Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.233498 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.233548 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.233561 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.233596 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.233612 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:25Z","lastTransitionTime":"2025-12-13T06:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:25 crc kubenswrapper[5048]: E1213 06:30:25.250160 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:25Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.254418 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.254532 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.254546 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.254566 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.254579 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:25Z","lastTransitionTime":"2025-12-13T06:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:25 crc kubenswrapper[5048]: E1213 06:30:25.268617 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:25Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.273692 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.273770 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.273784 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.273808 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.273823 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:25Z","lastTransitionTime":"2025-12-13T06:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:25 crc kubenswrapper[5048]: E1213 06:30:25.287106 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:25Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:25 crc kubenswrapper[5048]: E1213 06:30:25.287277 5048 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.289683 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.289733 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.289744 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.289762 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.289775 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:25Z","lastTransitionTime":"2025-12-13T06:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.392355 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.392443 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.392484 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.392509 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.392525 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:25Z","lastTransitionTime":"2025-12-13T06:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.495549 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.495607 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.495622 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.495638 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.495651 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:25Z","lastTransitionTime":"2025-12-13T06:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.565760 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:25 crc kubenswrapper[5048]: E1213 06:30:25.565978 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.598052 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.598104 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.598114 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.598134 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.598144 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:25Z","lastTransitionTime":"2025-12-13T06:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.701239 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.701298 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.701311 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.701330 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.701342 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:25Z","lastTransitionTime":"2025-12-13T06:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.803996 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.804076 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.804089 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.804106 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.804118 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:25Z","lastTransitionTime":"2025-12-13T06:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.907175 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.907241 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.907250 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.907264 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:25 crc kubenswrapper[5048]: I1213 06:30:25.907276 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:25Z","lastTransitionTime":"2025-12-13T06:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.010532 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.010597 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.010610 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.010634 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.010651 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:26Z","lastTransitionTime":"2025-12-13T06:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.113862 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.113903 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.113913 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.113932 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.113944 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:26Z","lastTransitionTime":"2025-12-13T06:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.216345 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.216397 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.216407 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.216427 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.216465 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:26Z","lastTransitionTime":"2025-12-13T06:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.319178 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.319237 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.319246 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.319267 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.319283 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:26Z","lastTransitionTime":"2025-12-13T06:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.422017 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.422096 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.422110 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.422134 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.422147 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:26Z","lastTransitionTime":"2025-12-13T06:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.526932 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.527281 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.527371 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.527486 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.527833 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:26Z","lastTransitionTime":"2025-12-13T06:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.565739 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.565775 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.565803 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:26 crc kubenswrapper[5048]: E1213 06:30:26.566243 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:26 crc kubenswrapper[5048]: E1213 06:30:26.566092 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:26 crc kubenswrapper[5048]: E1213 06:30:26.566350 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.577006 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:26Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.599080 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f01e17a58a23bf59a1f30f8f3e0fa84f9a8e3557f77b4aa2509403086fcd597c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f01e17a58a23bf59a1f30f8f3e0fa84f9a8e3557f77b4aa2509403086fcd597c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:11Z\\\",\\\"message\\\":\\\"e]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.213:80:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {2ead45b3-c313-4fbc-a7bc-2b3c4ffd610c}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1213 06:30:10.597757 6539 ovnkube.go:599] Stopped ovnkube\\\\nI1213 06:30:10.597818 6539 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1213 06:30:10.597880 6539 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator\\\\\\\"}\\\\nI1213 06:30:10.596663 6539 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/catalog-operator-metrics\\\\\\\"}\\\\nF1213 06:30:10.597931 6539 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for namespace Informer during admin network policy controller initialization, handler {0x1fcbf20 0x1fcbc00 0x1fcbba0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotati\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hfgcf_openshift-ovn-kubernetes(caf986e7-b521-40fd-ae26-18716730d57d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:26Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.612351 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c880cc39f16d08a56ae903058580b30b83005a94239bc86b63ce2827d3e3338e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63a4b5bf19a626d27b5ff5bbe0faac497702e18b0003790f0165d788dcc3326b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:26Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.623379 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:26Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.633253 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.633325 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.633336 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.633359 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.633372 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:26Z","lastTransitionTime":"2025-12-13T06:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.641269 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:26Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.657539 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:26Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.672616 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:26Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.689116 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:26Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.711209 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:26Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.725391 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:26Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.736060 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.736100 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.736114 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.736130 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.736141 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:26Z","lastTransitionTime":"2025-12-13T06:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.738311 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8d297a9-d6df-4f4c-bb78-7eff45697942\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bdca655a7c7ffd64ba59fb2ba574a7fef8ff566bcf1c75776b7cd93d734b694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6f921bd85ec5e159d59b8ee9e98026784f7ed189beea02e9f76fced30361160\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0db465510d0e1d5bf6f917f72d4b32608ab044e44bcf883bcfd9634d2d18ae4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:26Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.756115 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db7dc4c24b2f287638a86fcc16432cadbd96b03b50a7cb88b8b59a178a89001a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:26Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.784817 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:26Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.806917 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:26Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.831190 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:26Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.838422 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.838474 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.838483 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.838499 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.838509 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:26Z","lastTransitionTime":"2025-12-13T06:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.848828 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:26Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.864933 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:26Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.884563 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:26Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.942515 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.942571 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.942582 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.942599 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:26 crc kubenswrapper[5048]: I1213 06:30:26.942611 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:26Z","lastTransitionTime":"2025-12-13T06:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.044606 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.044647 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.044657 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.044675 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.044687 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:27Z","lastTransitionTime":"2025-12-13T06:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.148217 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.148952 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.148995 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.149017 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.149027 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:27Z","lastTransitionTime":"2025-12-13T06:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.252086 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.252152 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.252165 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.252184 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.252196 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:27Z","lastTransitionTime":"2025-12-13T06:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.354714 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.354845 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.354856 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.354875 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.354887 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:27Z","lastTransitionTime":"2025-12-13T06:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.457531 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.457887 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.457955 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.458077 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.458159 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:27Z","lastTransitionTime":"2025-12-13T06:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.561290 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.561324 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.561335 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.561351 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.561362 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:27Z","lastTransitionTime":"2025-12-13T06:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.565819 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:27 crc kubenswrapper[5048]: E1213 06:30:27.565954 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.663674 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.663741 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.663753 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.663780 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.663799 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:27Z","lastTransitionTime":"2025-12-13T06:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.766411 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.766460 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.766469 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.766482 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.766490 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:27Z","lastTransitionTime":"2025-12-13T06:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.869132 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.869173 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.869182 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.869196 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.869206 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:27Z","lastTransitionTime":"2025-12-13T06:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.971485 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.971526 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.971535 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.971554 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:27 crc kubenswrapper[5048]: I1213 06:30:27.971565 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:27Z","lastTransitionTime":"2025-12-13T06:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.074316 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.074346 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.074353 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.074366 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.074375 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:28Z","lastTransitionTime":"2025-12-13T06:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.176409 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.176742 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.176883 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.177007 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.177110 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:28Z","lastTransitionTime":"2025-12-13T06:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.280003 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.280041 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.280049 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.280063 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.280071 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:28Z","lastTransitionTime":"2025-12-13T06:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.382544 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.382606 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.382618 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.382639 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.382653 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:28Z","lastTransitionTime":"2025-12-13T06:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.485090 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.485120 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.485129 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.485141 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.485149 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:28Z","lastTransitionTime":"2025-12-13T06:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.567327 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:28 crc kubenswrapper[5048]: E1213 06:30:28.567514 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.567594 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.567628 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:28 crc kubenswrapper[5048]: E1213 06:30:28.567656 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:28 crc kubenswrapper[5048]: E1213 06:30:28.567739 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.569995 5048 scope.go:117] "RemoveContainer" containerID="f01e17a58a23bf59a1f30f8f3e0fa84f9a8e3557f77b4aa2509403086fcd597c" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.587320 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.587348 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.587356 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.587369 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.587377 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:28Z","lastTransitionTime":"2025-12-13T06:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.689539 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.689970 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.689984 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.690005 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.690016 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:28Z","lastTransitionTime":"2025-12-13T06:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.793523 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.793580 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.793594 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.793617 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.793631 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:28Z","lastTransitionTime":"2025-12-13T06:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.896807 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.896848 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.896863 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.896881 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.896891 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:28Z","lastTransitionTime":"2025-12-13T06:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.999646 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.999691 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.999701 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.999723 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:28 crc kubenswrapper[5048]: I1213 06:30:28.999734 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:28Z","lastTransitionTime":"2025-12-13T06:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.102935 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.102987 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.102996 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.103016 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.103031 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:29Z","lastTransitionTime":"2025-12-13T06:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.206314 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.206358 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.206366 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.206392 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.206409 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:29Z","lastTransitionTime":"2025-12-13T06:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.311081 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.311133 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.311146 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.311166 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.311185 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:29Z","lastTransitionTime":"2025-12-13T06:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.413865 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.413893 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.413901 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.413913 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.413922 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:29Z","lastTransitionTime":"2025-12-13T06:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.445281 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hfgcf_caf986e7-b521-40fd-ae26-18716730d57d/ovnkube-controller/1.log" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.448244 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerStarted","Data":"1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca"} Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.448760 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.463484 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:29Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.477346 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:29Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.489480 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:29Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.507081 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:29Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.516047 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.516085 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.516097 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.516114 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.516125 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:29Z","lastTransitionTime":"2025-12-13T06:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.523981 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:29Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.545251 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:29Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.558655 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:29Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.566093 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:29 crc kubenswrapper[5048]: E1213 06:30:29.566203 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.571645 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8d297a9-d6df-4f4c-bb78-7eff45697942\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bdca655a7c7ffd64ba59fb2ba574a7fef8ff566bcf1c75776b7cd93d734b694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6f921bd85ec5e159d59b8ee9e98026784f7ed189beea02e9f76fced30361160\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0db465510d0e1d5bf6f917f72d4b32608ab044e44bcf883bcfd9634d2d18ae4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:29Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.586483 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db7dc4c24b2f287638a86fcc16432cadbd96b03b50a7cb88b8b59a178a89001a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:29Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.597056 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:29Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.606425 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:29Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.618796 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.618850 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.618862 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.618877 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.618889 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:29Z","lastTransitionTime":"2025-12-13T06:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.620460 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:29Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.631849 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:29Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.645493 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:29Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.656918 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:29Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.668230 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:29Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.684983 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f01e17a58a23bf59a1f30f8f3e0fa84f9a8e3557f77b4aa2509403086fcd597c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:11Z\\\",\\\"message\\\":\\\"e]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.213:80:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {2ead45b3-c313-4fbc-a7bc-2b3c4ffd610c}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1213 06:30:10.597757 6539 ovnkube.go:599] Stopped ovnkube\\\\nI1213 06:30:10.597818 6539 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1213 06:30:10.597880 6539 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator\\\\\\\"}\\\\nI1213 06:30:10.596663 6539 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/catalog-operator-metrics\\\\\\\"}\\\\nF1213 06:30:10.597931 6539 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for namespace Informer during admin network policy controller initialization, handler {0x1fcbf20 0x1fcbc00 0x1fcbba0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotati\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:29Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.694168 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c880cc39f16d08a56ae903058580b30b83005a94239bc86b63ce2827d3e3338e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63a4b5bf19a626d27b5ff5bbe0faac497702e18b0003790f0165d788dcc3326b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:29Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.722545 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.722593 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.722607 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.722624 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.722637 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:29Z","lastTransitionTime":"2025-12-13T06:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.825952 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.826001 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.826014 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.826032 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.826043 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:29Z","lastTransitionTime":"2025-12-13T06:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.930059 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.930396 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.930409 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.930457 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:29 crc kubenswrapper[5048]: I1213 06:30:29.930476 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:29Z","lastTransitionTime":"2025-12-13T06:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.033241 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.033276 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.033286 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.033301 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.033310 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:30Z","lastTransitionTime":"2025-12-13T06:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.136150 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.136235 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.136248 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.136268 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.136281 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:30Z","lastTransitionTime":"2025-12-13T06:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.239146 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.239209 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.239221 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.239239 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.239252 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:30Z","lastTransitionTime":"2025-12-13T06:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.341765 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.341811 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.341821 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.341834 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.341843 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:30Z","lastTransitionTime":"2025-12-13T06:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.444273 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.444338 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.444353 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.444370 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.444384 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:30Z","lastTransitionTime":"2025-12-13T06:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.451895 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hfgcf_caf986e7-b521-40fd-ae26-18716730d57d/ovnkube-controller/2.log" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.457094 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hfgcf_caf986e7-b521-40fd-ae26-18716730d57d/ovnkube-controller/1.log" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.460012 5048 generic.go:334] "Generic (PLEG): container finished" podID="caf986e7-b521-40fd-ae26-18716730d57d" containerID="1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca" exitCode=1 Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.460059 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerDied","Data":"1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca"} Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.460098 5048 scope.go:117] "RemoveContainer" containerID="f01e17a58a23bf59a1f30f8f3e0fa84f9a8e3557f77b4aa2509403086fcd597c" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.460908 5048 scope.go:117] "RemoveContainer" containerID="1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca" Dec 13 06:30:30 crc kubenswrapper[5048]: E1213 06:30:30.461071 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hfgcf_openshift-ovn-kubernetes(caf986e7-b521-40fd-ae26-18716730d57d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" podUID="caf986e7-b521-40fd-ae26-18716730d57d" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.475356 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:30Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.486266 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:30Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.497153 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:30Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.519860 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f01e17a58a23bf59a1f30f8f3e0fa84f9a8e3557f77b4aa2509403086fcd597c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:11Z\\\",\\\"message\\\":\\\"e]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.213:80:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {2ead45b3-c313-4fbc-a7bc-2b3c4ffd610c}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1213 06:30:10.597757 6539 ovnkube.go:599] Stopped ovnkube\\\\nI1213 06:30:10.597818 6539 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1213 06:30:10.597880 6539 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator\\\\\\\"}\\\\nI1213 06:30:10.596663 6539 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/catalog-operator-metrics\\\\\\\"}\\\\nF1213 06:30:10.597931 6539 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for namespace Informer during admin network policy controller initialization, handler {0x1fcbf20 0x1fcbc00 0x1fcbba0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotati\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:29Z\\\",\\\"message\\\":\\\"06:30:29.506839 6780 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nI1213 06:30:29.506847 6780 default_network_controller.go:776] Recording success event on pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1213 06:30:29.506799 6780 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-bdd78 in node crc\\\\nI1213 06:30:29.506858 6780 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-bdd78 after 0 failed attempt(s)\\\\nI1213 06:30:29.506859 6780 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1213 06:30:29.506862 6780 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-bdd78\\\\nI1213 06:30:29.506867 6780 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1213 06:30:29.506715 6780 lb_config.go:1031] Cluster endpoints for openshift-machine-api/machine-api-operator for network=default are: map[]\\\\nI1213 06:30:29.506875 6780 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI1213 06:30:29.506881 6780 obj_retry.go:386]\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:30Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.533044 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c880cc39f16d08a56ae903058580b30b83005a94239bc86b63ce2827d3e3338e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63a4b5bf19a626d27b5ff5bbe0faac497702e18b0003790f0165d788dcc3326b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:30Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.547295 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.547351 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.547363 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.547381 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.547393 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:30Z","lastTransitionTime":"2025-12-13T06:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.559710 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:30Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.566471 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:30 crc kubenswrapper[5048]: E1213 06:30:30.566631 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.566858 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:30 crc kubenswrapper[5048]: E1213 06:30:30.566912 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.567263 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:30 crc kubenswrapper[5048]: E1213 06:30:30.567527 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.576583 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:30Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.592384 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8d297a9-d6df-4f4c-bb78-7eff45697942\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bdca655a7c7ffd64ba59fb2ba574a7fef8ff566bcf1c75776b7cd93d734b694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6f921bd85ec5e159d59b8ee9e98026784f7ed189beea02e9f76fced30361160\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0db465510d0e1d5bf6f917f72d4b32608ab044e44bcf883bcfd9634d2d18ae4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:30Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.608526 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:30Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.624914 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:30Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.642534 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:30Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.650034 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.650079 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.650089 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.650104 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.650116 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:30Z","lastTransitionTime":"2025-12-13T06:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.656673 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:30Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.672679 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:30Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.691255 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db7dc4c24b2f287638a86fcc16432cadbd96b03b50a7cb88b8b59a178a89001a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:30Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.705893 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:30Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.724948 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:30Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.738851 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:30Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.748738 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:30Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.752891 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.752961 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.752973 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.752992 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.753005 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:30Z","lastTransitionTime":"2025-12-13T06:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.854953 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.854983 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.854990 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.855002 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.855011 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:30Z","lastTransitionTime":"2025-12-13T06:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.957851 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.957885 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.957894 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.957908 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:30 crc kubenswrapper[5048]: I1213 06:30:30.957917 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:30Z","lastTransitionTime":"2025-12-13T06:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.061243 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.061309 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.061323 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.061354 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.061374 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:31Z","lastTransitionTime":"2025-12-13T06:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.164652 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.164727 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.164740 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.164760 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.164772 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:31Z","lastTransitionTime":"2025-12-13T06:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.267610 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.267727 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.267743 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.267767 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.267781 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:31Z","lastTransitionTime":"2025-12-13T06:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.370913 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.370977 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.370989 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.371010 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.371026 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:31Z","lastTransitionTime":"2025-12-13T06:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.465996 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hfgcf_caf986e7-b521-40fd-ae26-18716730d57d/ovnkube-controller/2.log" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.470283 5048 scope.go:117] "RemoveContainer" containerID="1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca" Dec 13 06:30:31 crc kubenswrapper[5048]: E1213 06:30:31.470437 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hfgcf_openshift-ovn-kubernetes(caf986e7-b521-40fd-ae26-18716730d57d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" podUID="caf986e7-b521-40fd-ae26-18716730d57d" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.476928 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.477001 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.477016 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.477037 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.477055 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:31Z","lastTransitionTime":"2025-12-13T06:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.494928 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:29Z\\\",\\\"message\\\":\\\"06:30:29.506839 6780 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nI1213 06:30:29.506847 6780 default_network_controller.go:776] Recording success event on pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1213 06:30:29.506799 6780 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-bdd78 in node crc\\\\nI1213 06:30:29.506858 6780 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-bdd78 after 0 failed attempt(s)\\\\nI1213 06:30:29.506859 6780 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1213 06:30:29.506862 6780 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-bdd78\\\\nI1213 06:30:29.506867 6780 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1213 06:30:29.506715 6780 lb_config.go:1031] Cluster endpoints for openshift-machine-api/machine-api-operator for network=default are: map[]\\\\nI1213 06:30:29.506875 6780 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI1213 06:30:29.506881 6780 obj_retry.go:386]\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hfgcf_openshift-ovn-kubernetes(caf986e7-b521-40fd-ae26-18716730d57d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:31Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.505763 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c880cc39f16d08a56ae903058580b30b83005a94239bc86b63ce2827d3e3338e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63a4b5bf19a626d27b5ff5bbe0faac497702e18b0003790f0165d788dcc3326b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:31Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.514626 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:31Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.527260 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:31Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.539488 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8d297a9-d6df-4f4c-bb78-7eff45697942\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bdca655a7c7ffd64ba59fb2ba574a7fef8ff566bcf1c75776b7cd93d734b694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6f921bd85ec5e159d59b8ee9e98026784f7ed189beea02e9f76fced30361160\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0db465510d0e1d5bf6f917f72d4b32608ab044e44bcf883bcfd9634d2d18ae4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:31Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.552740 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:31Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.568242 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:31 crc kubenswrapper[5048]: E1213 06:30:31.568416 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.569067 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:31Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.581610 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.581649 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.581658 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.581673 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.581683 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:31Z","lastTransitionTime":"2025-12-13T06:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.584821 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:31Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.584919 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.602964 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:31Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.617284 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:31Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.640290 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:31Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.653329 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:31Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.669605 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db7dc4c24b2f287638a86fcc16432cadbd96b03b50a7cb88b8b59a178a89001a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:31Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.684299 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:31Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.684363 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.684392 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.684401 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.684415 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.684426 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:31Z","lastTransitionTime":"2025-12-13T06:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.696925 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:31Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.711498 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:31Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.727045 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:31Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.742223 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:31Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.786926 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.787009 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.787025 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.787044 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.787057 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:31Z","lastTransitionTime":"2025-12-13T06:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.890151 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.890214 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.890224 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.890285 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.890299 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:31Z","lastTransitionTime":"2025-12-13T06:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.993489 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.993548 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.993563 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.993582 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:31 crc kubenswrapper[5048]: I1213 06:30:31.993594 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:31Z","lastTransitionTime":"2025-12-13T06:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.095882 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.095935 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.095947 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.095969 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.095980 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:32Z","lastTransitionTime":"2025-12-13T06:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.198709 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.198780 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.198805 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.198840 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.198857 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:32Z","lastTransitionTime":"2025-12-13T06:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.301891 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.301950 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.301963 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.301985 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.302001 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:32Z","lastTransitionTime":"2025-12-13T06:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.404666 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.404723 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.404732 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.404759 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.404773 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:32Z","lastTransitionTime":"2025-12-13T06:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.506961 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.507006 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.507017 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.507032 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.507044 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:32Z","lastTransitionTime":"2025-12-13T06:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.565883 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.565930 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.565959 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:32 crc kubenswrapper[5048]: E1213 06:30:32.566042 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:32 crc kubenswrapper[5048]: E1213 06:30:32.566036 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:32 crc kubenswrapper[5048]: E1213 06:30:32.566149 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.609360 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.609402 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.609417 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.609437 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.609475 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:32Z","lastTransitionTime":"2025-12-13T06:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.712494 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.712545 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.712560 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.712578 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.712590 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:32Z","lastTransitionTime":"2025-12-13T06:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.815192 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.815286 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.815300 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.815315 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.815326 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:32Z","lastTransitionTime":"2025-12-13T06:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.917328 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.917378 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.917390 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.917407 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:32 crc kubenswrapper[5048]: I1213 06:30:32.917418 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:32Z","lastTransitionTime":"2025-12-13T06:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.020331 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.020386 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.020397 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.020414 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.020423 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:33Z","lastTransitionTime":"2025-12-13T06:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.123289 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.123339 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.123350 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.123365 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.123376 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:33Z","lastTransitionTime":"2025-12-13T06:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.226280 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.226330 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.226342 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.226359 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.226371 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:33Z","lastTransitionTime":"2025-12-13T06:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.328842 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.328875 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.328882 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.328895 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.328903 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:33Z","lastTransitionTime":"2025-12-13T06:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.431199 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.431276 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.431288 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.431303 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.431313 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:33Z","lastTransitionTime":"2025-12-13T06:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.534174 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.534642 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.534670 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.534694 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.534709 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:33Z","lastTransitionTime":"2025-12-13T06:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.566690 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:33 crc kubenswrapper[5048]: E1213 06:30:33.566917 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.638681 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.638755 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.638769 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.638795 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.638811 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:33Z","lastTransitionTime":"2025-12-13T06:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.742408 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.742517 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.742531 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.742556 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.742571 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:33Z","lastTransitionTime":"2025-12-13T06:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.845729 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.845808 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.845819 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.845843 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.845855 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:33Z","lastTransitionTime":"2025-12-13T06:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.948471 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.948518 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.948531 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.948560 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:33 crc kubenswrapper[5048]: I1213 06:30:33.948579 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:33Z","lastTransitionTime":"2025-12-13T06:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.051079 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.051129 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.051141 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.051158 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.051169 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:34Z","lastTransitionTime":"2025-12-13T06:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.154332 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.154548 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.154563 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.154584 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.154784 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:34Z","lastTransitionTime":"2025-12-13T06:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.257184 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.257243 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.257255 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.257271 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.257284 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:34Z","lastTransitionTime":"2025-12-13T06:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.359537 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.359580 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.359592 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.359609 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.359620 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:34Z","lastTransitionTime":"2025-12-13T06:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.462204 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.462585 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.462696 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.462901 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.463029 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:34Z","lastTransitionTime":"2025-12-13T06:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.565829 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.565899 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.565829 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:34 crc kubenswrapper[5048]: E1213 06:30:34.566107 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:34 crc kubenswrapper[5048]: E1213 06:30:34.566155 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:34 crc kubenswrapper[5048]: E1213 06:30:34.566309 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.566676 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.566787 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.566881 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.566977 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.567060 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:34Z","lastTransitionTime":"2025-12-13T06:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.669576 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.669629 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.669641 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.669683 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.669695 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:34Z","lastTransitionTime":"2025-12-13T06:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.771978 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.772019 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.772028 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.772041 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.772051 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:34Z","lastTransitionTime":"2025-12-13T06:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.874505 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.874860 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.874933 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.875022 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.875120 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:34Z","lastTransitionTime":"2025-12-13T06:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.978175 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.978222 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.978232 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.978249 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:34 crc kubenswrapper[5048]: I1213 06:30:34.978263 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:34Z","lastTransitionTime":"2025-12-13T06:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.080578 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.081110 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.081189 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.081262 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.081330 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:35Z","lastTransitionTime":"2025-12-13T06:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.184165 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.184204 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.184219 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.184236 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.184248 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:35Z","lastTransitionTime":"2025-12-13T06:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.286906 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.286955 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.286964 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.286979 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.286988 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:35Z","lastTransitionTime":"2025-12-13T06:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.389216 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.389259 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.389268 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.389281 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.389290 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:35Z","lastTransitionTime":"2025-12-13T06:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.427004 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.427046 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.427054 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.427072 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.427086 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:35Z","lastTransitionTime":"2025-12-13T06:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:35 crc kubenswrapper[5048]: E1213 06:30:35.440828 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:35Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.444376 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.444408 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.444418 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.444431 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.444457 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:35Z","lastTransitionTime":"2025-12-13T06:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:35 crc kubenswrapper[5048]: E1213 06:30:35.455886 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:35Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.459770 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.459890 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.459979 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.460060 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.460137 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:35Z","lastTransitionTime":"2025-12-13T06:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:35 crc kubenswrapper[5048]: E1213 06:30:35.474355 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:35Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.477693 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.477741 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.477751 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.477767 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.477780 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:35Z","lastTransitionTime":"2025-12-13T06:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:35 crc kubenswrapper[5048]: E1213 06:30:35.490336 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:35Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.494172 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.494216 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.494229 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.494247 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.494261 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:35Z","lastTransitionTime":"2025-12-13T06:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:35 crc kubenswrapper[5048]: E1213 06:30:35.506597 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:35Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:35 crc kubenswrapper[5048]: E1213 06:30:35.506716 5048 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.508208 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.508258 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.508274 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.508292 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.508306 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:35Z","lastTransitionTime":"2025-12-13T06:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.565740 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:35 crc kubenswrapper[5048]: E1213 06:30:35.565923 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.611091 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.611126 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.611137 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.611152 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.611162 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:35Z","lastTransitionTime":"2025-12-13T06:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.713861 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.713900 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.713922 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.713938 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.713948 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:35Z","lastTransitionTime":"2025-12-13T06:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.816376 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.816421 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.816450 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.816465 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.816476 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:35Z","lastTransitionTime":"2025-12-13T06:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.919772 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.919819 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.919827 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.919841 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:35 crc kubenswrapper[5048]: I1213 06:30:35.919849 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:35Z","lastTransitionTime":"2025-12-13T06:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.021930 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.021977 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.021989 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.022006 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.022020 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:36Z","lastTransitionTime":"2025-12-13T06:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.124203 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.124239 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.124249 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.124264 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.124276 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:36Z","lastTransitionTime":"2025-12-13T06:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.227090 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.227464 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.227475 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.227488 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.227497 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:36Z","lastTransitionTime":"2025-12-13T06:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.329623 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.329656 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.329666 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.329679 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.329691 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:36Z","lastTransitionTime":"2025-12-13T06:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.432359 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.432388 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.432399 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.432414 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.432425 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:36Z","lastTransitionTime":"2025-12-13T06:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.534369 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.534519 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.534530 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.534545 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.534555 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:36Z","lastTransitionTime":"2025-12-13T06:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.565854 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:36 crc kubenswrapper[5048]: E1213 06:30:36.566224 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.565976 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:36 crc kubenswrapper[5048]: E1213 06:30:36.566543 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.565862 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:36 crc kubenswrapper[5048]: E1213 06:30:36.566778 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.577723 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:36Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.593913 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:36Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.603771 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"600ac0db-9d2a-49c8-b58b-78f6f3ed60e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f8010716141c431c87894a8a22182ae06fd0671ad4bd6988888756f027a240d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2144a7f51ff3f41cfe760f2939d1b2d31ef9c9e4863fe8b26eaeda22a2f5ee23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2144a7f51ff3f41cfe760f2939d1b2d31ef9c9e4863fe8b26eaeda22a2f5ee23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:36Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.616151 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:36Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.628987 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:36Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.637001 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.637043 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.637052 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.637067 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.637076 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:36Z","lastTransitionTime":"2025-12-13T06:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.641245 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:36Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.652238 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:36Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.672505 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:29Z\\\",\\\"message\\\":\\\"06:30:29.506839 6780 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nI1213 06:30:29.506847 6780 default_network_controller.go:776] Recording success event on pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1213 06:30:29.506799 6780 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-bdd78 in node crc\\\\nI1213 06:30:29.506858 6780 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-bdd78 after 0 failed attempt(s)\\\\nI1213 06:30:29.506859 6780 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1213 06:30:29.506862 6780 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-bdd78\\\\nI1213 06:30:29.506867 6780 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1213 06:30:29.506715 6780 lb_config.go:1031] Cluster endpoints for openshift-machine-api/machine-api-operator for network=default are: map[]\\\\nI1213 06:30:29.506875 6780 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI1213 06:30:29.506881 6780 obj_retry.go:386]\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hfgcf_openshift-ovn-kubernetes(caf986e7-b521-40fd-ae26-18716730d57d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:36Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.691306 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c880cc39f16d08a56ae903058580b30b83005a94239bc86b63ce2827d3e3338e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63a4b5bf19a626d27b5ff5bbe0faac497702e18b0003790f0165d788dcc3326b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:36Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.706378 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:36Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.722629 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:36Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.735526 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:36Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.738805 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.738827 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.738834 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.738846 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.738854 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:36Z","lastTransitionTime":"2025-12-13T06:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.747857 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:36Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.760408 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:36Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.781110 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:36Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.793197 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:36Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.829705 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8d297a9-d6df-4f4c-bb78-7eff45697942\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bdca655a7c7ffd64ba59fb2ba574a7fef8ff566bcf1c75776b7cd93d734b694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6f921bd85ec5e159d59b8ee9e98026784f7ed189beea02e9f76fced30361160\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0db465510d0e1d5bf6f917f72d4b32608ab044e44bcf883bcfd9634d2d18ae4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:36Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.841178 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.841205 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.841214 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.841227 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.841235 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:36Z","lastTransitionTime":"2025-12-13T06:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.843729 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db7dc4c24b2f287638a86fcc16432cadbd96b03b50a7cb88b8b59a178a89001a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:36Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.853901 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:36Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.943386 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.943459 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.943469 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.943485 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:36 crc kubenswrapper[5048]: I1213 06:30:36.943498 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:36Z","lastTransitionTime":"2025-12-13T06:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.028101 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs\") pod \"network-metrics-daemon-tm62z\" (UID: \"226b24e2-92c6-43d1-a621-09702ffa8fd4\") " pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:37 crc kubenswrapper[5048]: E1213 06:30:37.028232 5048 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 13 06:30:37 crc kubenswrapper[5048]: E1213 06:30:37.028281 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs podName:226b24e2-92c6-43d1-a621-09702ffa8fd4 nodeName:}" failed. No retries permitted until 2025-12-13 06:31:09.028268187 +0000 UTC m=+102.894862768 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs") pod "network-metrics-daemon-tm62z" (UID: "226b24e2-92c6-43d1-a621-09702ffa8fd4") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.045257 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.045298 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.045308 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.045322 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.045331 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:37Z","lastTransitionTime":"2025-12-13T06:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.147893 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.147966 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.147979 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.148003 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.148016 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:37Z","lastTransitionTime":"2025-12-13T06:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.250857 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.250900 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.250908 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.250923 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.250932 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:37Z","lastTransitionTime":"2025-12-13T06:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.354003 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.354067 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.354092 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.354121 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.354142 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:37Z","lastTransitionTime":"2025-12-13T06:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.456178 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.456214 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.456226 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.456241 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.456251 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:37Z","lastTransitionTime":"2025-12-13T06:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.559085 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.559136 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.559149 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.559169 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.559185 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:37Z","lastTransitionTime":"2025-12-13T06:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.566339 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:37 crc kubenswrapper[5048]: E1213 06:30:37.566504 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.661693 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.661780 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.661796 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.661821 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.661842 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:37Z","lastTransitionTime":"2025-12-13T06:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.764489 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.764605 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.764618 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.764685 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.764696 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:37Z","lastTransitionTime":"2025-12-13T06:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.867427 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.867490 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.867503 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.867519 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.867531 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:37Z","lastTransitionTime":"2025-12-13T06:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.970351 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.970414 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.970425 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.970468 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:37 crc kubenswrapper[5048]: I1213 06:30:37.970485 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:37Z","lastTransitionTime":"2025-12-13T06:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.072764 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.072800 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.072809 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.072832 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.072847 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:38Z","lastTransitionTime":"2025-12-13T06:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.174968 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.175012 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.175020 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.175034 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.175042 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:38Z","lastTransitionTime":"2025-12-13T06:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.277864 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.277923 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.277935 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.277949 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.277960 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:38Z","lastTransitionTime":"2025-12-13T06:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.380837 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.380883 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.380893 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.380908 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.380918 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:38Z","lastTransitionTime":"2025-12-13T06:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.483449 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.483500 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.483512 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.483528 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.483539 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:38Z","lastTransitionTime":"2025-12-13T06:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.565895 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.565990 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.565922 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:38 crc kubenswrapper[5048]: E1213 06:30:38.566053 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:38 crc kubenswrapper[5048]: E1213 06:30:38.566132 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:38 crc kubenswrapper[5048]: E1213 06:30:38.566242 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.586304 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.586338 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.586352 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.586367 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.586377 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:38Z","lastTransitionTime":"2025-12-13T06:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.688393 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.688491 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.688504 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.688521 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.688533 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:38Z","lastTransitionTime":"2025-12-13T06:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.790629 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.790680 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.790693 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.790711 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.790723 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:38Z","lastTransitionTime":"2025-12-13T06:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.892714 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.892762 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.892772 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.892788 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.892797 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:38Z","lastTransitionTime":"2025-12-13T06:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.995703 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.995751 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.995760 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.995773 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:38 crc kubenswrapper[5048]: I1213 06:30:38.995783 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:38Z","lastTransitionTime":"2025-12-13T06:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.099894 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.099965 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.099987 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.100015 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.100038 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:39Z","lastTransitionTime":"2025-12-13T06:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.202306 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.202344 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.202357 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.202373 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.202385 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:39Z","lastTransitionTime":"2025-12-13T06:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.304763 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.304863 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.304893 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.304925 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.304947 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:39Z","lastTransitionTime":"2025-12-13T06:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.407311 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.407362 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.407374 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.407393 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.407411 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:39Z","lastTransitionTime":"2025-12-13T06:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.509630 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.509698 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.509708 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.509728 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.509743 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:39Z","lastTransitionTime":"2025-12-13T06:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.566158 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:39 crc kubenswrapper[5048]: E1213 06:30:39.566320 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.613021 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.613072 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.613087 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.613107 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.613120 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:39Z","lastTransitionTime":"2025-12-13T06:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.717038 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.717101 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.717114 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.717138 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.717153 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:39Z","lastTransitionTime":"2025-12-13T06:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.819336 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.819384 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.819392 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.819407 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.819416 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:39Z","lastTransitionTime":"2025-12-13T06:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.921717 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.921757 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.921765 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.921781 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:39 crc kubenswrapper[5048]: I1213 06:30:39.921790 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:39Z","lastTransitionTime":"2025-12-13T06:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.024111 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.024153 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.024162 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.024176 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.024186 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:40Z","lastTransitionTime":"2025-12-13T06:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.127060 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.127154 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.127167 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.127185 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.127197 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:40Z","lastTransitionTime":"2025-12-13T06:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.229906 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.229960 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.229971 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.229990 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.230000 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:40Z","lastTransitionTime":"2025-12-13T06:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.333471 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.333527 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.333541 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.333558 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.333574 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:40Z","lastTransitionTime":"2025-12-13T06:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.438089 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.438158 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.438169 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.438186 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.438196 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:40Z","lastTransitionTime":"2025-12-13T06:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.501396 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-r42c6_627477f3-8fca-4b40-ace9-68d22f6b8576/kube-multus/0.log" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.501498 5048 generic.go:334] "Generic (PLEG): container finished" podID="627477f3-8fca-4b40-ace9-68d22f6b8576" containerID="c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a" exitCode=1 Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.501534 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-r42c6" event={"ID":"627477f3-8fca-4b40-ace9-68d22f6b8576","Type":"ContainerDied","Data":"c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a"} Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.501982 5048 scope.go:117] "RemoveContainer" containerID="c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.517014 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c880cc39f16d08a56ae903058580b30b83005a94239bc86b63ce2827d3e3338e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63a4b5bf19a626d27b5ff5bbe0faac497702e18b0003790f0165d788dcc3326b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:40Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.529952 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:40Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.542173 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.542223 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.542236 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.542255 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.542271 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:40Z","lastTransitionTime":"2025-12-13T06:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.556485 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:29Z\\\",\\\"message\\\":\\\"06:30:29.506839 6780 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nI1213 06:30:29.506847 6780 default_network_controller.go:776] Recording success event on pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1213 06:30:29.506799 6780 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-bdd78 in node crc\\\\nI1213 06:30:29.506858 6780 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-bdd78 after 0 failed attempt(s)\\\\nI1213 06:30:29.506859 6780 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1213 06:30:29.506862 6780 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-bdd78\\\\nI1213 06:30:29.506867 6780 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1213 06:30:29.506715 6780 lb_config.go:1031] Cluster endpoints for openshift-machine-api/machine-api-operator for network=default are: map[]\\\\nI1213 06:30:29.506875 6780 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI1213 06:30:29.506881 6780 obj_retry.go:386]\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hfgcf_openshift-ovn-kubernetes(caf986e7-b521-40fd-ae26-18716730d57d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:40Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.566774 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.566796 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.566929 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:40 crc kubenswrapper[5048]: E1213 06:30:40.567090 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:40 crc kubenswrapper[5048]: E1213 06:30:40.567178 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:40 crc kubenswrapper[5048]: E1213 06:30:40.567301 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.569483 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8d297a9-d6df-4f4c-bb78-7eff45697942\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bdca655a7c7ffd64ba59fb2ba574a7fef8ff566bcf1c75776b7cd93d734b694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6f921bd85ec5e159d59b8ee9e98026784f7ed189beea02e9f76fced30361160\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0db465510d0e1d5bf6f917f72d4b32608ab044e44bcf883bcfd9634d2d18ae4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:40Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.580982 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:40Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.594353 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:40Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.606077 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:40Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.620568 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:40Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.635471 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:40Z\\\",\\\"message\\\":\\\"2025-12-13T06:29:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b318d8f2-ad45-42da-a7a4-494c1a62c42d\\\\n2025-12-13T06:29:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b318d8f2-ad45-42da-a7a4-494c1a62c42d to /host/opt/cni/bin/\\\\n2025-12-13T06:29:55Z [verbose] multus-daemon started\\\\n2025-12-13T06:29:55Z [verbose] Readiness Indicator file check\\\\n2025-12-13T06:30:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:40Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.645231 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.645281 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.645307 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.645331 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.645347 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:40Z","lastTransitionTime":"2025-12-13T06:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.660158 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:40Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.671088 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:40Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.685008 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db7dc4c24b2f287638a86fcc16432cadbd96b03b50a7cb88b8b59a178a89001a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:40Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.694039 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:40Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.703907 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:40Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.713778 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:40Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.725487 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:40Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.736558 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"600ac0db-9d2a-49c8-b58b-78f6f3ed60e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f8010716141c431c87894a8a22182ae06fd0671ad4bd6988888756f027a240d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2144a7f51ff3f41cfe760f2939d1b2d31ef9c9e4863fe8b26eaeda22a2f5ee23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2144a7f51ff3f41cfe760f2939d1b2d31ef9c9e4863fe8b26eaeda22a2f5ee23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:40Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.748324 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:40Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.748944 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.748984 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.748992 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.749006 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.749032 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:40Z","lastTransitionTime":"2025-12-13T06:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.760684 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:40Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.851936 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.851984 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.852001 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.852021 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.852034 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:40Z","lastTransitionTime":"2025-12-13T06:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.954283 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.954321 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.954329 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.954344 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:40 crc kubenswrapper[5048]: I1213 06:30:40.954352 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:40Z","lastTransitionTime":"2025-12-13T06:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.057250 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.057294 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.057302 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.057336 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.057346 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:41Z","lastTransitionTime":"2025-12-13T06:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.159955 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.160017 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.160034 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.160054 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.160068 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:41Z","lastTransitionTime":"2025-12-13T06:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.262159 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.262211 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.262222 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.262239 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.262252 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:41Z","lastTransitionTime":"2025-12-13T06:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.364098 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.364133 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.364142 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.364154 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.364162 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:41Z","lastTransitionTime":"2025-12-13T06:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.466632 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.466678 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.466697 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.466716 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.466727 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:41Z","lastTransitionTime":"2025-12-13T06:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.506352 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-r42c6_627477f3-8fca-4b40-ace9-68d22f6b8576/kube-multus/0.log" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.506423 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-r42c6" event={"ID":"627477f3-8fca-4b40-ace9-68d22f6b8576","Type":"ContainerStarted","Data":"29eacc23b1b0315e6101fa8981ce61752594e1488b26a1dfba5310a6893d0a9e"} Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.523744 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:41Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.537365 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"600ac0db-9d2a-49c8-b58b-78f6f3ed60e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f8010716141c431c87894a8a22182ae06fd0671ad4bd6988888756f027a240d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2144a7f51ff3f41cfe760f2939d1b2d31ef9c9e4863fe8b26eaeda22a2f5ee23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2144a7f51ff3f41cfe760f2939d1b2d31ef9c9e4863fe8b26eaeda22a2f5ee23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:41Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.550799 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:41Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.563076 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:41Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.566616 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:41 crc kubenswrapper[5048]: E1213 06:30:41.566775 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.568813 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.568857 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.568870 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.568885 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.568896 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:41Z","lastTransitionTime":"2025-12-13T06:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.578817 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:41Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.589618 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:41Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.599949 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:41Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.616019 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:29Z\\\",\\\"message\\\":\\\"06:30:29.506839 6780 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nI1213 06:30:29.506847 6780 default_network_controller.go:776] Recording success event on pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1213 06:30:29.506799 6780 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-bdd78 in node crc\\\\nI1213 06:30:29.506858 6780 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-bdd78 after 0 failed attempt(s)\\\\nI1213 06:30:29.506859 6780 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1213 06:30:29.506862 6780 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-bdd78\\\\nI1213 06:30:29.506867 6780 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1213 06:30:29.506715 6780 lb_config.go:1031] Cluster endpoints for openshift-machine-api/machine-api-operator for network=default are: map[]\\\\nI1213 06:30:29.506875 6780 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI1213 06:30:29.506881 6780 obj_retry.go:386]\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hfgcf_openshift-ovn-kubernetes(caf986e7-b521-40fd-ae26-18716730d57d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:41Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.625599 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c880cc39f16d08a56ae903058580b30b83005a94239bc86b63ce2827d3e3338e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63a4b5bf19a626d27b5ff5bbe0faac497702e18b0003790f0165d788dcc3326b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:41Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.637208 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:41Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.649099 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:41Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.662428 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29eacc23b1b0315e6101fa8981ce61752594e1488b26a1dfba5310a6893d0a9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:40Z\\\",\\\"message\\\":\\\"2025-12-13T06:29:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b318d8f2-ad45-42da-a7a4-494c1a62c42d\\\\n2025-12-13T06:29:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b318d8f2-ad45-42da-a7a4-494c1a62c42d to /host/opt/cni/bin/\\\\n2025-12-13T06:29:55Z [verbose] multus-daemon started\\\\n2025-12-13T06:29:55Z [verbose] Readiness Indicator file check\\\\n2025-12-13T06:30:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:41Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.671126 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.671189 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.671205 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.671225 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.671239 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:41Z","lastTransitionTime":"2025-12-13T06:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.681925 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:41Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.695976 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:41Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.709692 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8d297a9-d6df-4f4c-bb78-7eff45697942\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bdca655a7c7ffd64ba59fb2ba574a7fef8ff566bcf1c75776b7cd93d734b694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6f921bd85ec5e159d59b8ee9e98026784f7ed189beea02e9f76fced30361160\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0db465510d0e1d5bf6f917f72d4b32608ab044e44bcf883bcfd9634d2d18ae4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:41Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.720823 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:41Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.732736 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:41Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.745845 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db7dc4c24b2f287638a86fcc16432cadbd96b03b50a7cb88b8b59a178a89001a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:41Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.755082 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:41Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.774056 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.774109 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.774120 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.774134 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.774143 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:41Z","lastTransitionTime":"2025-12-13T06:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.877032 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.877093 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.877103 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.877119 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.877130 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:41Z","lastTransitionTime":"2025-12-13T06:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.979495 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.979574 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.979586 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.979605 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:41 crc kubenswrapper[5048]: I1213 06:30:41.979635 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:41Z","lastTransitionTime":"2025-12-13T06:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.082303 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.082343 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.082354 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.082371 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.082382 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:42Z","lastTransitionTime":"2025-12-13T06:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.184799 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.184856 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.184872 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.184893 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.184910 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:42Z","lastTransitionTime":"2025-12-13T06:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.288026 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.288100 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.288111 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.288124 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.288132 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:42Z","lastTransitionTime":"2025-12-13T06:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.391994 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.392036 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.392045 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.392060 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.392068 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:42Z","lastTransitionTime":"2025-12-13T06:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.494864 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.494916 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.494930 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.494951 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.494964 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:42Z","lastTransitionTime":"2025-12-13T06:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.566788 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.566890 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.566922 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:42 crc kubenswrapper[5048]: E1213 06:30:42.567063 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:42 crc kubenswrapper[5048]: E1213 06:30:42.567736 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:42 crc kubenswrapper[5048]: E1213 06:30:42.567956 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.569069 5048 scope.go:117] "RemoveContainer" containerID="1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca" Dec 13 06:30:42 crc kubenswrapper[5048]: E1213 06:30:42.569484 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hfgcf_openshift-ovn-kubernetes(caf986e7-b521-40fd-ae26-18716730d57d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" podUID="caf986e7-b521-40fd-ae26-18716730d57d" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.597039 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.597074 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.597083 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.597096 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.597193 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:42Z","lastTransitionTime":"2025-12-13T06:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.700848 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.700967 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.701005 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.701039 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.701061 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:42Z","lastTransitionTime":"2025-12-13T06:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.804683 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.804761 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.804787 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.804824 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.804852 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:42Z","lastTransitionTime":"2025-12-13T06:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.908501 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.908570 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.908583 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.908609 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:42 crc kubenswrapper[5048]: I1213 06:30:42.908623 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:42Z","lastTransitionTime":"2025-12-13T06:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.012263 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.012340 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.012351 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.012368 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.012379 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:43Z","lastTransitionTime":"2025-12-13T06:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.116163 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.116260 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.116280 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.116307 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.116324 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:43Z","lastTransitionTime":"2025-12-13T06:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.219813 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.219889 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.219899 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.219920 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.219932 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:43Z","lastTransitionTime":"2025-12-13T06:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.322734 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.322814 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.322829 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.322846 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.322859 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:43Z","lastTransitionTime":"2025-12-13T06:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.426205 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.426295 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.426306 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.426320 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.426330 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:43Z","lastTransitionTime":"2025-12-13T06:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.528568 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.528629 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.528638 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.528661 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.528673 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:43Z","lastTransitionTime":"2025-12-13T06:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.565969 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:43 crc kubenswrapper[5048]: E1213 06:30:43.566155 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.631199 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.631238 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.631251 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.631269 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.631287 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:43Z","lastTransitionTime":"2025-12-13T06:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.733231 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.733287 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.733311 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.733369 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.733387 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:43Z","lastTransitionTime":"2025-12-13T06:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.835396 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.835478 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.835495 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.835519 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.835535 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:43Z","lastTransitionTime":"2025-12-13T06:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.938352 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.938410 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.938426 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.938477 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:43 crc kubenswrapper[5048]: I1213 06:30:43.938498 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:43Z","lastTransitionTime":"2025-12-13T06:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.041193 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.041267 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.041289 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.041316 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.041345 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:44Z","lastTransitionTime":"2025-12-13T06:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.144023 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.144060 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.144071 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.144087 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.144097 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:44Z","lastTransitionTime":"2025-12-13T06:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.246804 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.246846 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.246857 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.246875 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.246884 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:44Z","lastTransitionTime":"2025-12-13T06:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.350195 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.350257 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.350268 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.350286 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.350296 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:44Z","lastTransitionTime":"2025-12-13T06:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.453191 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.453239 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.453248 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.453264 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.453274 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:44Z","lastTransitionTime":"2025-12-13T06:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.557179 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.557245 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.557254 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.557276 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.557288 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:44Z","lastTransitionTime":"2025-12-13T06:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.566424 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.566518 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:44 crc kubenswrapper[5048]: E1213 06:30:44.566617 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.566675 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:44 crc kubenswrapper[5048]: E1213 06:30:44.566835 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:44 crc kubenswrapper[5048]: E1213 06:30:44.566938 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.660017 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.660108 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.660129 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.660153 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.660172 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:44Z","lastTransitionTime":"2025-12-13T06:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.763860 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.763913 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.763930 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.763953 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.763971 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:44Z","lastTransitionTime":"2025-12-13T06:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.867147 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.867227 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.867241 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.867262 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.867273 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:44Z","lastTransitionTime":"2025-12-13T06:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.970297 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.970372 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.970386 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.970416 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:44 crc kubenswrapper[5048]: I1213 06:30:44.970475 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:44Z","lastTransitionTime":"2025-12-13T06:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.074733 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.074802 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.074816 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.074846 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.074886 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:45Z","lastTransitionTime":"2025-12-13T06:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.178397 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.178480 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.178492 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.178539 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.178559 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:45Z","lastTransitionTime":"2025-12-13T06:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.282081 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.282137 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.282147 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.282165 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.282175 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:45Z","lastTransitionTime":"2025-12-13T06:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.385326 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.385355 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.385363 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.385376 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.385385 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:45Z","lastTransitionTime":"2025-12-13T06:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.487524 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.487566 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.487581 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.487597 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.487610 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:45Z","lastTransitionTime":"2025-12-13T06:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.566521 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:45 crc kubenswrapper[5048]: E1213 06:30:45.566738 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.590512 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.590559 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.590568 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.590584 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.590594 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:45Z","lastTransitionTime":"2025-12-13T06:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.694319 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.694376 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.694387 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.694411 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.694424 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:45Z","lastTransitionTime":"2025-12-13T06:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.797809 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.797851 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.797859 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.797872 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.797882 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:45Z","lastTransitionTime":"2025-12-13T06:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.885232 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.885307 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.885326 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.885345 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.885357 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:45Z","lastTransitionTime":"2025-12-13T06:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:45 crc kubenswrapper[5048]: E1213 06:30:45.898734 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:45Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.904337 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.904408 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.904421 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.904465 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.904479 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:45Z","lastTransitionTime":"2025-12-13T06:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:45 crc kubenswrapper[5048]: E1213 06:30:45.917923 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:45Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.922619 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.922664 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.922681 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.922703 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.922718 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:45Z","lastTransitionTime":"2025-12-13T06:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:45 crc kubenswrapper[5048]: E1213 06:30:45.937126 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:45Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.942422 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.942500 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.942510 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.942545 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.942561 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:45Z","lastTransitionTime":"2025-12-13T06:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:45 crc kubenswrapper[5048]: E1213 06:30:45.959600 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:45Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.964701 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.964772 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.964783 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.964810 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.964832 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:45Z","lastTransitionTime":"2025-12-13T06:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:45 crc kubenswrapper[5048]: E1213 06:30:45.980098 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:45Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:45 crc kubenswrapper[5048]: E1213 06:30:45.980279 5048 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.982224 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.982277 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.982289 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.982310 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:45 crc kubenswrapper[5048]: I1213 06:30:45.982325 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:45Z","lastTransitionTime":"2025-12-13T06:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.085031 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.085081 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.085093 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.085110 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.085122 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:46Z","lastTransitionTime":"2025-12-13T06:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.188309 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.188356 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.188364 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.188378 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.188387 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:46Z","lastTransitionTime":"2025-12-13T06:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.291698 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.291747 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.291756 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.291769 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.291778 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:46Z","lastTransitionTime":"2025-12-13T06:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.394791 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.394851 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.394859 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.394873 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.394882 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:46Z","lastTransitionTime":"2025-12-13T06:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.496822 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.496879 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.496890 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.496907 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.496919 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:46Z","lastTransitionTime":"2025-12-13T06:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.566724 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:46 crc kubenswrapper[5048]: E1213 06:30:46.566881 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.567311 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:46 crc kubenswrapper[5048]: E1213 06:30:46.567402 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.567664 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:46 crc kubenswrapper[5048]: E1213 06:30:46.567833 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.584517 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:46Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.595914 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:46Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.600301 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.600348 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.600357 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.600374 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.600383 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:46Z","lastTransitionTime":"2025-12-13T06:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.608877 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:46Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.638924 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:29Z\\\",\\\"message\\\":\\\"06:30:29.506839 6780 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nI1213 06:30:29.506847 6780 default_network_controller.go:776] Recording success event on pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1213 06:30:29.506799 6780 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-bdd78 in node crc\\\\nI1213 06:30:29.506858 6780 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-bdd78 after 0 failed attempt(s)\\\\nI1213 06:30:29.506859 6780 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1213 06:30:29.506862 6780 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-bdd78\\\\nI1213 06:30:29.506867 6780 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1213 06:30:29.506715 6780 lb_config.go:1031] Cluster endpoints for openshift-machine-api/machine-api-operator for network=default are: map[]\\\\nI1213 06:30:29.506875 6780 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI1213 06:30:29.506881 6780 obj_retry.go:386]\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hfgcf_openshift-ovn-kubernetes(caf986e7-b521-40fd-ae26-18716730d57d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:46Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.652349 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c880cc39f16d08a56ae903058580b30b83005a94239bc86b63ce2827d3e3338e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63a4b5bf19a626d27b5ff5bbe0faac497702e18b0003790f0165d788dcc3326b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:46Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.667566 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:46Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.681963 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:46Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.695488 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:46Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.702044 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.702088 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.702099 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.702115 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.702126 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:46Z","lastTransitionTime":"2025-12-13T06:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.710652 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29eacc23b1b0315e6101fa8981ce61752594e1488b26a1dfba5310a6893d0a9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:40Z\\\",\\\"message\\\":\\\"2025-12-13T06:29:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b318d8f2-ad45-42da-a7a4-494c1a62c42d\\\\n2025-12-13T06:29:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b318d8f2-ad45-42da-a7a4-494c1a62c42d to /host/opt/cni/bin/\\\\n2025-12-13T06:29:55Z [verbose] multus-daemon started\\\\n2025-12-13T06:29:55Z [verbose] Readiness Indicator file check\\\\n2025-12-13T06:30:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:46Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.735663 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:46Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.751660 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:46Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.765934 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8d297a9-d6df-4f4c-bb78-7eff45697942\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bdca655a7c7ffd64ba59fb2ba574a7fef8ff566bcf1c75776b7cd93d734b694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6f921bd85ec5e159d59b8ee9e98026784f7ed189beea02e9f76fced30361160\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0db465510d0e1d5bf6f917f72d4b32608ab044e44bcf883bcfd9634d2d18ae4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:46Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.779618 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:46Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.794490 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db7dc4c24b2f287638a86fcc16432cadbd96b03b50a7cb88b8b59a178a89001a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:46Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.805707 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.805745 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.805754 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.805768 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.805779 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:46Z","lastTransitionTime":"2025-12-13T06:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.807644 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:46Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.821897 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:46Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.834271 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"600ac0db-9d2a-49c8-b58b-78f6f3ed60e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f8010716141c431c87894a8a22182ae06fd0671ad4bd6988888756f027a240d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2144a7f51ff3f41cfe760f2939d1b2d31ef9c9e4863fe8b26eaeda22a2f5ee23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2144a7f51ff3f41cfe760f2939d1b2d31ef9c9e4863fe8b26eaeda22a2f5ee23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:46Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.848410 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:46Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.861777 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:46Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.908417 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.908475 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.908486 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.908503 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:46 crc kubenswrapper[5048]: I1213 06:30:46.908515 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:46Z","lastTransitionTime":"2025-12-13T06:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.011034 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.011101 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.011119 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.011143 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.011160 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:47Z","lastTransitionTime":"2025-12-13T06:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.113341 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.113388 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.113404 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.113420 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.113450 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:47Z","lastTransitionTime":"2025-12-13T06:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.227482 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.227535 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.227544 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.227558 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.227571 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:47Z","lastTransitionTime":"2025-12-13T06:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.335145 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.335209 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.335220 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.335237 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.335252 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:47Z","lastTransitionTime":"2025-12-13T06:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.438248 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.438324 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.438336 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.438360 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.438373 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:47Z","lastTransitionTime":"2025-12-13T06:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.540994 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.541070 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.541080 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.541099 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.541112 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:47Z","lastTransitionTime":"2025-12-13T06:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.566906 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:47 crc kubenswrapper[5048]: E1213 06:30:47.567206 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.643372 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.643415 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.643427 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.643460 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.643473 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:47Z","lastTransitionTime":"2025-12-13T06:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.745843 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.745895 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.745908 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.745922 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.745931 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:47Z","lastTransitionTime":"2025-12-13T06:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.848665 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.848704 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.848715 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.848731 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.848742 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:47Z","lastTransitionTime":"2025-12-13T06:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.951096 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.951126 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.951138 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.951160 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:47 crc kubenswrapper[5048]: I1213 06:30:47.951184 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:47Z","lastTransitionTime":"2025-12-13T06:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.053878 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.053933 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.053945 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.053960 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.053970 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:48Z","lastTransitionTime":"2025-12-13T06:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.156672 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.156721 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.156732 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.156748 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.156759 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:48Z","lastTransitionTime":"2025-12-13T06:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.260081 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.260119 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.260130 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.260145 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.260157 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:48Z","lastTransitionTime":"2025-12-13T06:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.362799 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.362840 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.362849 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.362861 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.362870 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:48Z","lastTransitionTime":"2025-12-13T06:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.464597 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.464647 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.464656 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.464670 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.464680 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:48Z","lastTransitionTime":"2025-12-13T06:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.565836 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:48 crc kubenswrapper[5048]: E1213 06:30:48.565943 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.565833 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.565987 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:48 crc kubenswrapper[5048]: E1213 06:30:48.566024 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:48 crc kubenswrapper[5048]: E1213 06:30:48.566068 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.566549 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.566589 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.566605 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.566617 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.566626 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:48Z","lastTransitionTime":"2025-12-13T06:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.669680 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.669711 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.669719 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.669733 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.669741 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:48Z","lastTransitionTime":"2025-12-13T06:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.772835 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.772883 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.772897 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.772913 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.772924 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:48Z","lastTransitionTime":"2025-12-13T06:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.876533 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.876589 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.876600 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.876619 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.876632 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:48Z","lastTransitionTime":"2025-12-13T06:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.979657 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.979713 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.979724 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.979747 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:48 crc kubenswrapper[5048]: I1213 06:30:48.979758 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:48Z","lastTransitionTime":"2025-12-13T06:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.082538 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.082604 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.082617 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.082636 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.082648 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:49Z","lastTransitionTime":"2025-12-13T06:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.185402 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.185454 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.185465 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.185478 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.185491 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:49Z","lastTransitionTime":"2025-12-13T06:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.288015 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.288050 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.288058 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.288071 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.288083 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:49Z","lastTransitionTime":"2025-12-13T06:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.390556 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.390693 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.390710 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.390724 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.390733 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:49Z","lastTransitionTime":"2025-12-13T06:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.493733 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.493793 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.493806 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.493825 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.493839 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:49Z","lastTransitionTime":"2025-12-13T06:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.565793 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:49 crc kubenswrapper[5048]: E1213 06:30:49.566001 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.596634 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.596695 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.596708 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.596728 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.596740 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:49Z","lastTransitionTime":"2025-12-13T06:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.699659 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.699703 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.699711 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.699723 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.699732 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:49Z","lastTransitionTime":"2025-12-13T06:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.803129 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.803200 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.803212 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.803231 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.803244 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:49Z","lastTransitionTime":"2025-12-13T06:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.906018 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.906088 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.906099 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.906115 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:49 crc kubenswrapper[5048]: I1213 06:30:49.906125 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:49Z","lastTransitionTime":"2025-12-13T06:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.009375 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.009470 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.009484 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.009506 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.009522 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:50Z","lastTransitionTime":"2025-12-13T06:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.112558 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.112623 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.112637 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.112663 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.112678 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:50Z","lastTransitionTime":"2025-12-13T06:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.215491 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.215564 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.215576 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.215594 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.215606 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:50Z","lastTransitionTime":"2025-12-13T06:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.317797 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.317841 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.317851 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.317867 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.317876 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:50Z","lastTransitionTime":"2025-12-13T06:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.420144 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.420190 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.420202 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.420219 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.420231 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:50Z","lastTransitionTime":"2025-12-13T06:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.523986 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.524029 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.524040 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.524056 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.524065 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:50Z","lastTransitionTime":"2025-12-13T06:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.566194 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.566280 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.566245 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:50 crc kubenswrapper[5048]: E1213 06:30:50.566397 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:50 crc kubenswrapper[5048]: E1213 06:30:50.566663 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:50 crc kubenswrapper[5048]: E1213 06:30:50.566718 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.627024 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.627085 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.627100 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.627126 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.627141 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:50Z","lastTransitionTime":"2025-12-13T06:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.730619 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.730698 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.730709 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.730730 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.730742 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:50Z","lastTransitionTime":"2025-12-13T06:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.834199 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.834266 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.834278 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.834294 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.834304 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:50Z","lastTransitionTime":"2025-12-13T06:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.937997 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.938071 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.938084 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.938108 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:50 crc kubenswrapper[5048]: I1213 06:30:50.938123 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:50Z","lastTransitionTime":"2025-12-13T06:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.041686 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.041755 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.041766 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.041806 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.041817 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:51Z","lastTransitionTime":"2025-12-13T06:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.143891 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.143941 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.143955 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.143973 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.143986 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:51Z","lastTransitionTime":"2025-12-13T06:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.247814 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.247864 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.247874 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.247890 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.247899 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:51Z","lastTransitionTime":"2025-12-13T06:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.350407 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.350499 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.350516 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.350534 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.350550 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:51Z","lastTransitionTime":"2025-12-13T06:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.453233 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.453293 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.453304 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.453320 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.453333 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:51Z","lastTransitionTime":"2025-12-13T06:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.554998 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.555030 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.555038 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.555051 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.555061 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:51Z","lastTransitionTime":"2025-12-13T06:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.566362 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:51 crc kubenswrapper[5048]: E1213 06:30:51.566603 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.657134 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.657177 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.657188 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.657207 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.657219 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:51Z","lastTransitionTime":"2025-12-13T06:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.759999 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.760040 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.760053 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.760070 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.760089 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:51Z","lastTransitionTime":"2025-12-13T06:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.863250 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.863316 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.863332 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.863355 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.863372 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:51Z","lastTransitionTime":"2025-12-13T06:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.892292 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:30:51 crc kubenswrapper[5048]: E1213 06:30:51.892558 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:55.892524254 +0000 UTC m=+149.759118835 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.892701 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:51 crc kubenswrapper[5048]: E1213 06:30:51.892849 5048 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 13 06:30:51 crc kubenswrapper[5048]: E1213 06:30:51.892980 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-13 06:31:55.892954486 +0000 UTC m=+149.759549097 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.966939 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.966974 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.966982 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.966996 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.967005 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:51Z","lastTransitionTime":"2025-12-13T06:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.994080 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.994132 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:51 crc kubenswrapper[5048]: I1213 06:30:51.994175 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:51 crc kubenswrapper[5048]: E1213 06:30:51.994292 5048 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 13 06:30:51 crc kubenswrapper[5048]: E1213 06:30:51.994347 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-13 06:31:55.994331115 +0000 UTC m=+149.860925696 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 13 06:30:51 crc kubenswrapper[5048]: E1213 06:30:51.994572 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 13 06:30:51 crc kubenswrapper[5048]: E1213 06:30:51.994589 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 13 06:30:51 crc kubenswrapper[5048]: E1213 06:30:51.994601 5048 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:30:51 crc kubenswrapper[5048]: E1213 06:30:51.994625 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-13 06:31:55.994617543 +0000 UTC m=+149.861212124 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:30:51 crc kubenswrapper[5048]: E1213 06:30:51.995074 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 13 06:30:51 crc kubenswrapper[5048]: E1213 06:30:51.995156 5048 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 13 06:30:51 crc kubenswrapper[5048]: E1213 06:30:51.995174 5048 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:30:51 crc kubenswrapper[5048]: E1213 06:30:51.995260 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-13 06:31:55.99522941 +0000 UTC m=+149.861824111 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.073535 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.073590 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.073603 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.073621 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.073634 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:52Z","lastTransitionTime":"2025-12-13T06:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.175814 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.175851 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.175859 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.175873 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.175882 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:52Z","lastTransitionTime":"2025-12-13T06:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.278702 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.278745 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.278756 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.278771 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.278781 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:52Z","lastTransitionTime":"2025-12-13T06:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.381572 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.381616 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.381625 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.381638 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.381650 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:52Z","lastTransitionTime":"2025-12-13T06:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.483661 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.483700 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.483710 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.483734 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.483746 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:52Z","lastTransitionTime":"2025-12-13T06:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.566937 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.566959 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.567083 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:52 crc kubenswrapper[5048]: E1213 06:30:52.567242 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:52 crc kubenswrapper[5048]: E1213 06:30:52.567371 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:52 crc kubenswrapper[5048]: E1213 06:30:52.567418 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.586470 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.586514 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.586541 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.586557 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.586567 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:52Z","lastTransitionTime":"2025-12-13T06:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.689867 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.689917 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.689931 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.689953 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.689968 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:52Z","lastTransitionTime":"2025-12-13T06:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.793759 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.794137 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.794146 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.794160 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.794172 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:52Z","lastTransitionTime":"2025-12-13T06:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.896697 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.896730 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.896740 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.896753 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.896762 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:52Z","lastTransitionTime":"2025-12-13T06:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.998806 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.998851 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.998860 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.998873 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:52 crc kubenswrapper[5048]: I1213 06:30:52.998882 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:52Z","lastTransitionTime":"2025-12-13T06:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.103214 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.103275 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.103298 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.103329 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.103352 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:53Z","lastTransitionTime":"2025-12-13T06:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.206130 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.206171 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.206182 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.206202 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.206214 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:53Z","lastTransitionTime":"2025-12-13T06:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.309019 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.309067 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.309076 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.309090 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.309100 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:53Z","lastTransitionTime":"2025-12-13T06:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.411528 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.411574 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.411592 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.411612 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.411625 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:53Z","lastTransitionTime":"2025-12-13T06:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.515707 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.515779 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.515799 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.515824 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.515843 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:53Z","lastTransitionTime":"2025-12-13T06:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.565814 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:53 crc kubenswrapper[5048]: E1213 06:30:53.565991 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.618553 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.618589 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.618599 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.618612 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.618621 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:53Z","lastTransitionTime":"2025-12-13T06:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.721019 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.721062 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.721074 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.721089 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.721101 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:53Z","lastTransitionTime":"2025-12-13T06:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.823894 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.823943 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.823957 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.823971 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.823982 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:53Z","lastTransitionTime":"2025-12-13T06:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.926331 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.926379 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.926390 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.926405 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:53 crc kubenswrapper[5048]: I1213 06:30:53.926419 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:53Z","lastTransitionTime":"2025-12-13T06:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.028753 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.028798 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.028808 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.028824 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.028874 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:54Z","lastTransitionTime":"2025-12-13T06:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.131609 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.131640 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.131649 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.131665 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.131675 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:54Z","lastTransitionTime":"2025-12-13T06:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.235112 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.235189 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.235200 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.235219 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.235231 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:54Z","lastTransitionTime":"2025-12-13T06:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.338404 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.338481 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.338493 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.338509 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.338519 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:54Z","lastTransitionTime":"2025-12-13T06:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.441734 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.441846 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.441858 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.441877 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.441890 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:54Z","lastTransitionTime":"2025-12-13T06:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.544940 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.545005 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.545023 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.545047 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.545062 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:54Z","lastTransitionTime":"2025-12-13T06:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.566262 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.566358 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.566399 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:54 crc kubenswrapper[5048]: E1213 06:30:54.566546 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:54 crc kubenswrapper[5048]: E1213 06:30:54.567098 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:54 crc kubenswrapper[5048]: E1213 06:30:54.567202 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.647545 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.647585 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.647596 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.647613 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.647624 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:54Z","lastTransitionTime":"2025-12-13T06:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.750529 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.750576 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.750588 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.750605 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.750616 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:54Z","lastTransitionTime":"2025-12-13T06:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.852639 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.852683 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.852692 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.852704 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.852714 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:54Z","lastTransitionTime":"2025-12-13T06:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.955293 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.955368 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.955389 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.955407 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:54 crc kubenswrapper[5048]: I1213 06:30:54.955422 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:54Z","lastTransitionTime":"2025-12-13T06:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.058642 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.058687 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.058700 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.058719 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.058732 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:55Z","lastTransitionTime":"2025-12-13T06:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.161888 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.161925 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.161937 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.161951 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.161970 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:55Z","lastTransitionTime":"2025-12-13T06:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.264784 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.264852 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.264869 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.264892 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.264910 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:55Z","lastTransitionTime":"2025-12-13T06:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.368822 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.368883 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.368894 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.368915 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.368933 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:55Z","lastTransitionTime":"2025-12-13T06:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.472362 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.472417 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.472456 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.472477 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.472492 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:55Z","lastTransitionTime":"2025-12-13T06:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.566334 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:55 crc kubenswrapper[5048]: E1213 06:30:55.566516 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.577672 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.577723 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.577734 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.577751 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.577762 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:55Z","lastTransitionTime":"2025-12-13T06:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.680457 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.680506 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.680525 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.680541 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.680554 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:55Z","lastTransitionTime":"2025-12-13T06:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.783406 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.783486 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.783504 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.783520 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.783531 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:55Z","lastTransitionTime":"2025-12-13T06:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.886195 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.886244 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.886256 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.886275 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.886289 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:55Z","lastTransitionTime":"2025-12-13T06:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.988907 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.988970 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.988982 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.989008 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:55 crc kubenswrapper[5048]: I1213 06:30:55.989022 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:55Z","lastTransitionTime":"2025-12-13T06:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.041981 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.042032 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.042052 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.042072 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.042085 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:56Z","lastTransitionTime":"2025-12-13T06:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:56 crc kubenswrapper[5048]: E1213 06:30:56.056122 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.060419 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.060486 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.060495 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.060511 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.060522 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:56Z","lastTransitionTime":"2025-12-13T06:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:56 crc kubenswrapper[5048]: E1213 06:30:56.072461 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.075987 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.076050 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.076065 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.076083 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.076097 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:56Z","lastTransitionTime":"2025-12-13T06:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:56 crc kubenswrapper[5048]: E1213 06:30:56.087505 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.091567 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.091631 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.091641 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.091656 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.091667 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:56Z","lastTransitionTime":"2025-12-13T06:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:56 crc kubenswrapper[5048]: E1213 06:30:56.102848 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.106401 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.106453 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.106464 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.106477 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.106486 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:56Z","lastTransitionTime":"2025-12-13T06:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:56 crc kubenswrapper[5048]: E1213 06:30:56.117620 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: E1213 06:30:56.117790 5048 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.119297 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.119331 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.119342 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.119358 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.119370 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:56Z","lastTransitionTime":"2025-12-13T06:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.222673 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.222755 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.222771 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.222793 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.222812 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:56Z","lastTransitionTime":"2025-12-13T06:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.325798 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.325834 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.325844 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.325858 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.325868 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:56Z","lastTransitionTime":"2025-12-13T06:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.428389 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.428424 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.428462 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.428476 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.428486 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:56Z","lastTransitionTime":"2025-12-13T06:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.531412 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.531477 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.531486 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.531501 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.531510 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:56Z","lastTransitionTime":"2025-12-13T06:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.566168 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.566241 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:56 crc kubenswrapper[5048]: E1213 06:30:56.566514 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.566544 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:56 crc kubenswrapper[5048]: E1213 06:30:56.566736 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:56 crc kubenswrapper[5048]: E1213 06:30:56.566988 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.567779 5048 scope.go:117] "RemoveContainer" containerID="1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.579065 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.598953 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:29Z\\\",\\\"message\\\":\\\"06:30:29.506839 6780 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nI1213 06:30:29.506847 6780 default_network_controller.go:776] Recording success event on pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1213 06:30:29.506799 6780 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-bdd78 in node crc\\\\nI1213 06:30:29.506858 6780 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-bdd78 after 0 failed attempt(s)\\\\nI1213 06:30:29.506859 6780 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1213 06:30:29.506862 6780 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-bdd78\\\\nI1213 06:30:29.506867 6780 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1213 06:30:29.506715 6780 lb_config.go:1031] Cluster endpoints for openshift-machine-api/machine-api-operator for network=default are: map[]\\\\nI1213 06:30:29.506875 6780 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI1213 06:30:29.506881 6780 obj_retry.go:386]\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hfgcf_openshift-ovn-kubernetes(caf986e7-b521-40fd-ae26-18716730d57d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.612031 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c880cc39f16d08a56ae903058580b30b83005a94239bc86b63ce2827d3e3338e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63a4b5bf19a626d27b5ff5bbe0faac497702e18b0003790f0165d788dcc3326b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.625087 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.634666 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.634696 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.634706 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.634719 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.634728 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:56Z","lastTransitionTime":"2025-12-13T06:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.641007 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.655366 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29eacc23b1b0315e6101fa8981ce61752594e1488b26a1dfba5310a6893d0a9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:40Z\\\",\\\"message\\\":\\\"2025-12-13T06:29:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b318d8f2-ad45-42da-a7a4-494c1a62c42d\\\\n2025-12-13T06:29:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b318d8f2-ad45-42da-a7a4-494c1a62c42d to /host/opt/cni/bin/\\\\n2025-12-13T06:29:55Z [verbose] multus-daemon started\\\\n2025-12-13T06:29:55Z [verbose] Readiness Indicator file check\\\\n2025-12-13T06:30:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.675886 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.688592 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.699908 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8d297a9-d6df-4f4c-bb78-7eff45697942\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bdca655a7c7ffd64ba59fb2ba574a7fef8ff566bcf1c75776b7cd93d734b694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6f921bd85ec5e159d59b8ee9e98026784f7ed189beea02e9f76fced30361160\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0db465510d0e1d5bf6f917f72d4b32608ab044e44bcf883bcfd9634d2d18ae4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.713616 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.727226 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.737844 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.737889 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.737902 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.737919 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.737931 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:56Z","lastTransitionTime":"2025-12-13T06:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.741235 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db7dc4c24b2f287638a86fcc16432cadbd96b03b50a7cb88b8b59a178a89001a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.753206 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.765903 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.775381 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"600ac0db-9d2a-49c8-b58b-78f6f3ed60e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f8010716141c431c87894a8a22182ae06fd0671ad4bd6988888756f027a240d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2144a7f51ff3f41cfe760f2939d1b2d31ef9c9e4863fe8b26eaeda22a2f5ee23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2144a7f51ff3f41cfe760f2939d1b2d31ef9c9e4863fe8b26eaeda22a2f5ee23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.785759 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.797100 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.809382 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.818736 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:56Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.840515 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.840550 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.840559 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.840574 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.840582 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:56Z","lastTransitionTime":"2025-12-13T06:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.948366 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.948407 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.948423 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.948456 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:56 crc kubenswrapper[5048]: I1213 06:30:56.948469 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:56Z","lastTransitionTime":"2025-12-13T06:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.054424 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.054490 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.054499 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.054511 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.054521 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:57Z","lastTransitionTime":"2025-12-13T06:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.157529 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.157946 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.158163 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.158409 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.158675 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:57Z","lastTransitionTime":"2025-12-13T06:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.262145 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.262199 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.262218 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.262241 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.262259 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:57Z","lastTransitionTime":"2025-12-13T06:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.365415 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.365564 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.365606 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.365645 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.365664 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:57Z","lastTransitionTime":"2025-12-13T06:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.469935 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.470001 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.470014 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.470036 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.470048 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:57Z","lastTransitionTime":"2025-12-13T06:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.562898 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hfgcf_caf986e7-b521-40fd-ae26-18716730d57d/ovnkube-controller/2.log" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.567112 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerStarted","Data":"4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2"} Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.570644 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.571254 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:57 crc kubenswrapper[5048]: E1213 06:30:57.571398 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.573606 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.573627 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.573637 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.573649 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.573659 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:57Z","lastTransitionTime":"2025-12-13T06:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.582917 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"600ac0db-9d2a-49c8-b58b-78f6f3ed60e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f8010716141c431c87894a8a22182ae06fd0671ad4bd6988888756f027a240d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2144a7f51ff3f41cfe760f2939d1b2d31ef9c9e4863fe8b26eaeda22a2f5ee23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2144a7f51ff3f41cfe760f2939d1b2d31ef9c9e4863fe8b26eaeda22a2f5ee23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.595945 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.608183 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.622497 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.633401 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.648862 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.673194 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:29Z\\\",\\\"message\\\":\\\"06:30:29.506839 6780 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nI1213 06:30:29.506847 6780 default_network_controller.go:776] Recording success event on pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1213 06:30:29.506799 6780 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-bdd78 in node crc\\\\nI1213 06:30:29.506858 6780 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-bdd78 after 0 failed attempt(s)\\\\nI1213 06:30:29.506859 6780 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1213 06:30:29.506862 6780 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-bdd78\\\\nI1213 06:30:29.506867 6780 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1213 06:30:29.506715 6780 lb_config.go:1031] Cluster endpoints for openshift-machine-api/machine-api-operator for network=default are: map[]\\\\nI1213 06:30:29.506875 6780 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI1213 06:30:29.506881 6780 obj_retry.go:386]\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.676781 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.676819 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.676831 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.676847 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.676858 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:57Z","lastTransitionTime":"2025-12-13T06:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.688173 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c880cc39f16d08a56ae903058580b30b83005a94239bc86b63ce2827d3e3338e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63a4b5bf19a626d27b5ff5bbe0faac497702e18b0003790f0165d788dcc3326b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.704360 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.717987 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.730395 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8d297a9-d6df-4f4c-bb78-7eff45697942\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bdca655a7c7ffd64ba59fb2ba574a7fef8ff566bcf1c75776b7cd93d734b694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6f921bd85ec5e159d59b8ee9e98026784f7ed189beea02e9f76fced30361160\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0db465510d0e1d5bf6f917f72d4b32608ab044e44bcf883bcfd9634d2d18ae4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.742932 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.760804 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.779616 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.779671 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.779682 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.779696 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.779709 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:57Z","lastTransitionTime":"2025-12-13T06:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.780420 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.801273 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.819053 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29eacc23b1b0315e6101fa8981ce61752594e1488b26a1dfba5310a6893d0a9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:40Z\\\",\\\"message\\\":\\\"2025-12-13T06:29:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b318d8f2-ad45-42da-a7a4-494c1a62c42d\\\\n2025-12-13T06:29:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b318d8f2-ad45-42da-a7a4-494c1a62c42d to /host/opt/cni/bin/\\\\n2025-12-13T06:29:55Z [verbose] multus-daemon started\\\\n2025-12-13T06:29:55Z [verbose] Readiness Indicator file check\\\\n2025-12-13T06:30:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.839017 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.849963 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.863514 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db7dc4c24b2f287638a86fcc16432cadbd96b03b50a7cb88b8b59a178a89001a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:57Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.882460 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.882488 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.882495 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.882508 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.882518 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:57Z","lastTransitionTime":"2025-12-13T06:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.984662 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.984704 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.984717 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.984732 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:57 crc kubenswrapper[5048]: I1213 06:30:57.984744 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:57Z","lastTransitionTime":"2025-12-13T06:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.087275 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.087318 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.087327 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.087341 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.087351 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:58Z","lastTransitionTime":"2025-12-13T06:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.190449 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.190491 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.190503 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.190516 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.190526 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:58Z","lastTransitionTime":"2025-12-13T06:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.292530 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.292574 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.292586 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.292602 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.292615 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:58Z","lastTransitionTime":"2025-12-13T06:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.394899 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.394935 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.394947 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.394960 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.394968 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:58Z","lastTransitionTime":"2025-12-13T06:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.497868 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.497951 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.497974 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.498004 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.498026 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:58Z","lastTransitionTime":"2025-12-13T06:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.566670 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:30:58 crc kubenswrapper[5048]: E1213 06:30:58.566812 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.566909 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.567120 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:30:58 crc kubenswrapper[5048]: E1213 06:30:58.567221 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:30:58 crc kubenswrapper[5048]: E1213 06:30:58.567333 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.571988 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hfgcf_caf986e7-b521-40fd-ae26-18716730d57d/ovnkube-controller/3.log" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.572621 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hfgcf_caf986e7-b521-40fd-ae26-18716730d57d/ovnkube-controller/2.log" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.575906 5048 generic.go:334] "Generic (PLEG): container finished" podID="caf986e7-b521-40fd-ae26-18716730d57d" containerID="4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2" exitCode=1 Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.575948 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerDied","Data":"4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2"} Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.575981 5048 scope.go:117] "RemoveContainer" containerID="1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.576934 5048 scope.go:117] "RemoveContainer" containerID="4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2" Dec 13 06:30:58 crc kubenswrapper[5048]: E1213 06:30:58.577307 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hfgcf_openshift-ovn-kubernetes(caf986e7-b521-40fd-ae26-18716730d57d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" podUID="caf986e7-b521-40fd-ae26-18716730d57d" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.598971 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.601175 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.601225 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.601242 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.601262 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.601278 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:58Z","lastTransitionTime":"2025-12-13T06:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.610368 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"600ac0db-9d2a-49c8-b58b-78f6f3ed60e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f8010716141c431c87894a8a22182ae06fd0671ad4bd6988888756f027a240d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2144a7f51ff3f41cfe760f2939d1b2d31ef9c9e4863fe8b26eaeda22a2f5ee23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2144a7f51ff3f41cfe760f2939d1b2d31ef9c9e4863fe8b26eaeda22a2f5ee23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.627057 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.641040 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.654000 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.667848 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.678469 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.704871 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.704932 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.704944 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.704962 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.704975 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:58Z","lastTransitionTime":"2025-12-13T06:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.707863 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1018805f5a3b3d4a892b9ec4836e2bcd7d56f8a089e1fa2767de4751fd554aca\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:29Z\\\",\\\"message\\\":\\\"06:30:29.506839 6780 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nI1213 06:30:29.506847 6780 default_network_controller.go:776] Recording success event on pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1213 06:30:29.506799 6780 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-bdd78 in node crc\\\\nI1213 06:30:29.506858 6780 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-bdd78 after 0 failed attempt(s)\\\\nI1213 06:30:29.506859 6780 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1213 06:30:29.506862 6780 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-bdd78\\\\nI1213 06:30:29.506867 6780 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1213 06:30:29.506715 6780 lb_config.go:1031] Cluster endpoints for openshift-machine-api/machine-api-operator for network=default are: map[]\\\\nI1213 06:30:29.506875 6780 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI1213 06:30:29.506881 6780 obj_retry.go:386]\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:58Z\\\",\\\"message\\\":\\\"Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-multus/multus-admission-controller_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-multus/multus-admission-controller\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.119\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.119\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1213 06:30:58.171372 7160 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-r42c6 in node crc\\\\nI1213 06:30:58.171386 7160 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-r42c6 after 0 failed attempt(s)\\\\nI1213 06:30:58.171397 7160 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-r42c6\\\\nI1213 06:30:58.171216 7160 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.720986 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c880cc39f16d08a56ae903058580b30b83005a94239bc86b63ce2827d3e3338e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63a4b5bf19a626d27b5ff5bbe0faac497702e18b0003790f0165d788dcc3326b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.736120 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.751971 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29eacc23b1b0315e6101fa8981ce61752594e1488b26a1dfba5310a6893d0a9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:40Z\\\",\\\"message\\\":\\\"2025-12-13T06:29:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b318d8f2-ad45-42da-a7a4-494c1a62c42d\\\\n2025-12-13T06:29:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b318d8f2-ad45-42da-a7a4-494c1a62c42d to /host/opt/cni/bin/\\\\n2025-12-13T06:29:55Z [verbose] multus-daemon started\\\\n2025-12-13T06:29:55Z [verbose] Readiness Indicator file check\\\\n2025-12-13T06:30:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.774365 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.787702 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.802165 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8d297a9-d6df-4f4c-bb78-7eff45697942\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bdca655a7c7ffd64ba59fb2ba574a7fef8ff566bcf1c75776b7cd93d734b694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6f921bd85ec5e159d59b8ee9e98026784f7ed189beea02e9f76fced30361160\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0db465510d0e1d5bf6f917f72d4b32608ab044e44bcf883bcfd9634d2d18ae4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.812323 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.812387 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.812400 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.812424 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.812460 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:58Z","lastTransitionTime":"2025-12-13T06:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.817306 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.833585 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.850969 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.870931 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db7dc4c24b2f287638a86fcc16432cadbd96b03b50a7cb88b8b59a178a89001a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.883819 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:58Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.915566 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.915666 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.915676 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.915693 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:58 crc kubenswrapper[5048]: I1213 06:30:58.915704 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:58Z","lastTransitionTime":"2025-12-13T06:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.018100 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.018125 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.018133 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.018145 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.018153 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:59Z","lastTransitionTime":"2025-12-13T06:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.121530 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.121573 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.121584 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.121601 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.121613 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:59Z","lastTransitionTime":"2025-12-13T06:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.224940 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.224991 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.225003 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.225018 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.225029 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:59Z","lastTransitionTime":"2025-12-13T06:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.327890 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.327928 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.327938 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.327952 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.327960 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:59Z","lastTransitionTime":"2025-12-13T06:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.430994 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.431127 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.431145 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.431174 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.431198 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:59Z","lastTransitionTime":"2025-12-13T06:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.534305 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.534339 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.534349 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.534361 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.534370 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:59Z","lastTransitionTime":"2025-12-13T06:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.566866 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:30:59 crc kubenswrapper[5048]: E1213 06:30:59.567067 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.581275 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hfgcf_caf986e7-b521-40fd-ae26-18716730d57d/ovnkube-controller/3.log" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.585071 5048 scope.go:117] "RemoveContainer" containerID="4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2" Dec 13 06:30:59 crc kubenswrapper[5048]: E1213 06:30:59.585210 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hfgcf_openshift-ovn-kubernetes(caf986e7-b521-40fd-ae26-18716730d57d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" podUID="caf986e7-b521-40fd-ae26-18716730d57d" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.600253 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.622042 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.632733 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"600ac0db-9d2a-49c8-b58b-78f6f3ed60e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f8010716141c431c87894a8a22182ae06fd0671ad4bd6988888756f027a240d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2144a7f51ff3f41cfe760f2939d1b2d31ef9c9e4863fe8b26eaeda22a2f5ee23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2144a7f51ff3f41cfe760f2939d1b2d31ef9c9e4863fe8b26eaeda22a2f5ee23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.636476 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.636526 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.636539 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.636558 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.636567 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:59Z","lastTransitionTime":"2025-12-13T06:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.644597 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.657146 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.667559 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.675341 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.691673 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:58Z\\\",\\\"message\\\":\\\"Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-multus/multus-admission-controller_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-multus/multus-admission-controller\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.119\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.119\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1213 06:30:58.171372 7160 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-r42c6 in node crc\\\\nI1213 06:30:58.171386 7160 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-r42c6 after 0 failed attempt(s)\\\\nI1213 06:30:58.171397 7160 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-r42c6\\\\nI1213 06:30:58.171216 7160 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hfgcf_openshift-ovn-kubernetes(caf986e7-b521-40fd-ae26-18716730d57d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.704895 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c880cc39f16d08a56ae903058580b30b83005a94239bc86b63ce2827d3e3338e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63a4b5bf19a626d27b5ff5bbe0faac497702e18b0003790f0165d788dcc3326b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.716177 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.729614 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.739908 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.739949 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.739958 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.739973 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.739984 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:59Z","lastTransitionTime":"2025-12-13T06:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.742745 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.758317 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.773880 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29eacc23b1b0315e6101fa8981ce61752594e1488b26a1dfba5310a6893d0a9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:40Z\\\",\\\"message\\\":\\\"2025-12-13T06:29:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b318d8f2-ad45-42da-a7a4-494c1a62c42d\\\\n2025-12-13T06:29:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b318d8f2-ad45-42da-a7a4-494c1a62c42d to /host/opt/cni/bin/\\\\n2025-12-13T06:29:55Z [verbose] multus-daemon started\\\\n2025-12-13T06:29:55Z [verbose] Readiness Indicator file check\\\\n2025-12-13T06:30:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.799591 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.815342 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.829195 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8d297a9-d6df-4f4c-bb78-7eff45697942\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bdca655a7c7ffd64ba59fb2ba574a7fef8ff566bcf1c75776b7cd93d734b694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6f921bd85ec5e159d59b8ee9e98026784f7ed189beea02e9f76fced30361160\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0db465510d0e1d5bf6f917f72d4b32608ab044e44bcf883bcfd9634d2d18ae4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.842518 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.842559 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.842594 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.842613 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.842626 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:59Z","lastTransitionTime":"2025-12-13T06:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.845699 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db7dc4c24b2f287638a86fcc16432cadbd96b03b50a7cb88b8b59a178a89001a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.855984 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:30:59Z is after 2025-08-24T17:21:41Z" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.945659 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.945711 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.945728 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.945748 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:30:59 crc kubenswrapper[5048]: I1213 06:30:59.945760 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:30:59Z","lastTransitionTime":"2025-12-13T06:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.048158 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.048198 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.048210 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.048226 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.048261 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:00Z","lastTransitionTime":"2025-12-13T06:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.150630 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.150668 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.150680 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.150695 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.150705 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:00Z","lastTransitionTime":"2025-12-13T06:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.252990 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.253035 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.253048 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.253064 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.253075 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:00Z","lastTransitionTime":"2025-12-13T06:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.355470 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.355517 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.355529 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.355549 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.355563 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:00Z","lastTransitionTime":"2025-12-13T06:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.458469 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.458503 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.458513 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.458529 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.458540 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:00Z","lastTransitionTime":"2025-12-13T06:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.561522 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.561591 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.561600 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.561614 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.561623 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:00Z","lastTransitionTime":"2025-12-13T06:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.568830 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.569002 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:00 crc kubenswrapper[5048]: E1213 06:31:00.569095 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.569169 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:00 crc kubenswrapper[5048]: E1213 06:31:00.569252 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:00 crc kubenswrapper[5048]: E1213 06:31:00.568993 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.663798 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.664092 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.664178 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.664264 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.664339 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:00Z","lastTransitionTime":"2025-12-13T06:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.767220 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.767267 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.767291 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.767306 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.767320 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:00Z","lastTransitionTime":"2025-12-13T06:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.874417 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.874471 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.874483 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.874497 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.874507 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:00Z","lastTransitionTime":"2025-12-13T06:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.978514 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.978555 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.978567 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.978584 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:00 crc kubenswrapper[5048]: I1213 06:31:00.978598 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:00Z","lastTransitionTime":"2025-12-13T06:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.081877 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.082160 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.082221 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.082288 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.082371 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:01Z","lastTransitionTime":"2025-12-13T06:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.185711 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.185749 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.185759 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.185773 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.185783 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:01Z","lastTransitionTime":"2025-12-13T06:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.288266 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.288312 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.288324 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.288346 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.288362 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:01Z","lastTransitionTime":"2025-12-13T06:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.390312 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.390363 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.390372 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.390385 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.390395 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:01Z","lastTransitionTime":"2025-12-13T06:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.493079 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.493130 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.493142 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.493160 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.493173 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:01Z","lastTransitionTime":"2025-12-13T06:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.566692 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:01 crc kubenswrapper[5048]: E1213 06:31:01.566839 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.595572 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.595611 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.595621 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.595634 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.595643 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:01Z","lastTransitionTime":"2025-12-13T06:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.698230 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.698263 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.698271 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.698284 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.698295 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:01Z","lastTransitionTime":"2025-12-13T06:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.800595 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.800646 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.800658 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.800674 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.800685 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:01Z","lastTransitionTime":"2025-12-13T06:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.903032 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.903085 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.903100 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.903129 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:01 crc kubenswrapper[5048]: I1213 06:31:01.903146 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:01Z","lastTransitionTime":"2025-12-13T06:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.005704 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.005751 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.005764 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.005779 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.005790 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:02Z","lastTransitionTime":"2025-12-13T06:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.108617 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.108658 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.108667 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.108682 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.108692 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:02Z","lastTransitionTime":"2025-12-13T06:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.211066 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.211162 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.211178 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.211233 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.211251 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:02Z","lastTransitionTime":"2025-12-13T06:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.314334 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.314408 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.314426 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.314483 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.314500 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:02Z","lastTransitionTime":"2025-12-13T06:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.417312 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.417369 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.417384 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.417405 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.417418 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:02Z","lastTransitionTime":"2025-12-13T06:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.519606 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.519684 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.519703 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.519740 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.519761 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:02Z","lastTransitionTime":"2025-12-13T06:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.565834 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.565937 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:02 crc kubenswrapper[5048]: E1213 06:31:02.565975 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.566059 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:02 crc kubenswrapper[5048]: E1213 06:31:02.566229 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:02 crc kubenswrapper[5048]: E1213 06:31:02.566478 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.621922 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.621961 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.621974 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.621987 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.621997 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:02Z","lastTransitionTime":"2025-12-13T06:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.724217 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.724265 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.724273 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.724296 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.724333 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:02Z","lastTransitionTime":"2025-12-13T06:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.827329 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.827370 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.827377 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.827391 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.827400 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:02Z","lastTransitionTime":"2025-12-13T06:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.930753 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.930793 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.930802 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.930816 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:02 crc kubenswrapper[5048]: I1213 06:31:02.930825 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:02Z","lastTransitionTime":"2025-12-13T06:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.035104 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.035203 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.035228 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.035263 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.035298 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:03Z","lastTransitionTime":"2025-12-13T06:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.137762 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.137812 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.137822 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.137835 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.137846 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:03Z","lastTransitionTime":"2025-12-13T06:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.241084 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.241146 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.241163 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.241185 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.241240 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:03Z","lastTransitionTime":"2025-12-13T06:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.344298 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.344338 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.344345 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.344359 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.344368 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:03Z","lastTransitionTime":"2025-12-13T06:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.447548 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.447609 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.447623 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.447641 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.447658 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:03Z","lastTransitionTime":"2025-12-13T06:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.550860 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.550927 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.550945 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.550967 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.550983 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:03Z","lastTransitionTime":"2025-12-13T06:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.566658 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:03 crc kubenswrapper[5048]: E1213 06:31:03.566829 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.653634 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.653704 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.653718 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.653738 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.653749 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:03Z","lastTransitionTime":"2025-12-13T06:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.756366 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.756408 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.756425 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.756544 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.756568 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:03Z","lastTransitionTime":"2025-12-13T06:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.858683 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.858767 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.858788 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.858811 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.858831 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:03Z","lastTransitionTime":"2025-12-13T06:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.961421 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.961531 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.961544 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.961563 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:03 crc kubenswrapper[5048]: I1213 06:31:03.961575 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:03Z","lastTransitionTime":"2025-12-13T06:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.064100 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.064160 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.064171 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.064188 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.064201 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:04Z","lastTransitionTime":"2025-12-13T06:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.167956 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.167993 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.168002 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.168015 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.168025 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:04Z","lastTransitionTime":"2025-12-13T06:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.271037 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.271106 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.271122 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.271142 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.271156 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:04Z","lastTransitionTime":"2025-12-13T06:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.374551 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.374603 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.374611 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.374629 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.374638 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:04Z","lastTransitionTime":"2025-12-13T06:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.477104 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.477173 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.477189 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.477215 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.477228 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:04Z","lastTransitionTime":"2025-12-13T06:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.566368 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.566522 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:04 crc kubenswrapper[5048]: E1213 06:31:04.566678 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.567030 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:04 crc kubenswrapper[5048]: E1213 06:31:04.567248 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:04 crc kubenswrapper[5048]: E1213 06:31:04.567339 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.580028 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.580063 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.580074 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.580093 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.580108 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:04Z","lastTransitionTime":"2025-12-13T06:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.683356 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.683422 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.683459 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.683485 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.683509 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:04Z","lastTransitionTime":"2025-12-13T06:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.785878 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.785925 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.785941 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.785960 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.785971 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:04Z","lastTransitionTime":"2025-12-13T06:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.888937 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.888972 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.888980 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.888992 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.889002 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:04Z","lastTransitionTime":"2025-12-13T06:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.991812 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.991858 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.991875 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.991893 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:04 crc kubenswrapper[5048]: I1213 06:31:04.991904 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:04Z","lastTransitionTime":"2025-12-13T06:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.095059 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.095121 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.095133 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.095150 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.095162 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:05Z","lastTransitionTime":"2025-12-13T06:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.198181 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.198242 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.198254 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.198272 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.198284 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:05Z","lastTransitionTime":"2025-12-13T06:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.301576 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.301638 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.301655 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.301677 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.301700 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:05Z","lastTransitionTime":"2025-12-13T06:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.405351 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.405493 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.405507 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.405529 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.405544 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:05Z","lastTransitionTime":"2025-12-13T06:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.508428 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.508521 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.508532 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.508547 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.508557 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:05Z","lastTransitionTime":"2025-12-13T06:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.566188 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:05 crc kubenswrapper[5048]: E1213 06:31:05.566316 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.611226 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.611311 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.611327 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.611347 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.611361 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:05Z","lastTransitionTime":"2025-12-13T06:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.714628 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.714707 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.714732 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.714762 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.714785 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:05Z","lastTransitionTime":"2025-12-13T06:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.818244 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.818316 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.818334 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.818360 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.818381 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:05Z","lastTransitionTime":"2025-12-13T06:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.922151 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.922225 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.922248 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.922276 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:05 crc kubenswrapper[5048]: I1213 06:31:05.922302 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:05Z","lastTransitionTime":"2025-12-13T06:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.025305 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.025346 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.025354 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.025368 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.025379 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:06Z","lastTransitionTime":"2025-12-13T06:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.128253 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.128292 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.128301 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.128314 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.128326 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:06Z","lastTransitionTime":"2025-12-13T06:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.230964 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.231040 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.231059 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.231083 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.231100 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:06Z","lastTransitionTime":"2025-12-13T06:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.334587 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.334657 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.334677 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.334708 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.334730 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:06Z","lastTransitionTime":"2025-12-13T06:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.426152 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.426201 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.426212 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.426228 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.426242 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:06Z","lastTransitionTime":"2025-12-13T06:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:06 crc kubenswrapper[5048]: E1213 06:31:06.439761 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.444388 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.444479 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.444495 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.444518 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.444535 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:06Z","lastTransitionTime":"2025-12-13T06:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:06 crc kubenswrapper[5048]: E1213 06:31:06.458916 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.463793 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.463853 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.463865 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.463888 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.463903 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:06Z","lastTransitionTime":"2025-12-13T06:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:06 crc kubenswrapper[5048]: E1213 06:31:06.478605 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.484002 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.484061 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.484074 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.484097 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.484113 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:06Z","lastTransitionTime":"2025-12-13T06:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:06 crc kubenswrapper[5048]: E1213 06:31:06.497996 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.502654 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.502718 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.502741 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.502770 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.502793 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:06Z","lastTransitionTime":"2025-12-13T06:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:06 crc kubenswrapper[5048]: E1213 06:31:06.516808 5048 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-13T06:31:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6aaa1fdd-aec3-41cf-bed2-ae7f1a625255\\\",\\\"systemUUID\\\":\\\"c40dcd55-9053-46a7-9f70-890f2d5d7520\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: E1213 06:31:06.517025 5048 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.518779 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.518843 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.518855 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.518873 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.518885 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:06Z","lastTransitionTime":"2025-12-13T06:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.566068 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.566110 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.566141 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:06 crc kubenswrapper[5048]: E1213 06:31:06.566255 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:06 crc kubenswrapper[5048]: E1213 06:31:06.566383 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:06 crc kubenswrapper[5048]: E1213 06:31:06.566544 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.583918 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e03fc33-848d-423d-85b3-4c2a578da740\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f8bbaeb69f92899b0871e249b7fcfc4ce5e56ea95172947cfa16523fc04537d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4b0f0b01d00b1912bc06407d22819b9ccc8fb17109ed344c618856f1e46b698\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c74bf511731163f1506cc114d801b35a53498e3c6c61df6853f57d629259f28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.598132 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8d297a9-d6df-4f4c-bb78-7eff45697942\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bdca655a7c7ffd64ba59fb2ba574a7fef8ff566bcf1c75776b7cd93d734b694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6f921bd85ec5e159d59b8ee9e98026784f7ed189beea02e9f76fced30361160\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0db465510d0e1d5bf6f917f72d4b32608ab044e44bcf883bcfd9634d2d18ae4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://719efa03a574deadefcdc3926b9aa9bf0d6f0f2ec72357b3cde3d17bf94eacd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.614796 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9cb898ea4a592dd2b1a937bce43e398147c7a53a731e7d4439c1b46b720a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.621181 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.621251 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.621274 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.621306 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.621330 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:06Z","lastTransitionTime":"2025-12-13T06:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.631636 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13df96a7a48b8b26f8e8df3701c0e30d6b2e51cfb3569f4804910cec82bdbad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.648625 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.668836 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.689560 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-r42c6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"627477f3-8fca-4b40-ace9-68d22f6b8576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29eacc23b1b0315e6101fa8981ce61752594e1488b26a1dfba5310a6893d0a9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:40Z\\\",\\\"message\\\":\\\"2025-12-13T06:29:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b318d8f2-ad45-42da-a7a4-494c1a62c42d\\\\n2025-12-13T06:29:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b318d8f2-ad45-42da-a7a4-494c1a62c42d to /host/opt/cni/bin/\\\\n2025-12-13T06:29:55Z [verbose] multus-daemon started\\\\n2025-12-13T06:29:55Z [verbose] Readiness Indicator file check\\\\n2025-12-13T06:30:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ngfjz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-r42c6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.716594 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc6662e3-8908-4346-a938-36a0494242de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3fd74ef77556c2660bc0e0a37273d91993b14a27e226d9c224b3672f3cb9649\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a687185142018579c1359aabbdf4b606177eb717f62e0fea3eac1d7c3e286c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58d37c8715c5a1c202ea3810a605e5fafcf2b1dfa5430fbec46277e7a38e4fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb85662c8bc6d5496f580bfeee2c02c425f22a85175c6d4c046e95c0b6c371d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4494d6f993b7c70c0aadf946685ece0c12a1d5f5fe7cb5032bd2dec8eeeed9ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7bcbd8011fbf07e75837c1db293f3ea7b84dab6e76772644f09160b03060fa5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cfe40441b2db2be5301a90ac93f8d9a699544d5167d7528df3ef0f326845f9a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://450e77e17c69ad6d7c5d9a2fba50fe90f3acf9e483bea09dc91393a2dfd5e1bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.724010 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.724046 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.724059 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.724075 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.724087 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:06Z","lastTransitionTime":"2025-12-13T06:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.727146 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tm62z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"226b24e2-92c6-43d1-a621-09702ffa8fd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhpwj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tm62z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.741990 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bdd78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8e1f99bb-aa5e-40eb-9b21-ff04d41acf50\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db7dc4c24b2f287638a86fcc16432cadbd96b03b50a7cb88b8b59a178a89001a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa582e472b44c4aa995645fc0e97ddf5bd54ed9425b3c0ff5091a31115835ab2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://61259010c16bd1c386579d73798242dd8cc8649728da00edd753165fb0aaa5ec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4e3cfc325694d38ca8b57f815ac96ec452db470b3ccf807390ff660bdaf74c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6d93932c3728d6d61fca0b281fc848e48b226dca78dec5295042f71f1df310c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac9ec8b7df0a602d38fa1cc562f85cf884f8309c1a00a9682eb50f6663f01045\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c2d537172a87f333dcd333ede74f32e19d28ea99991c1eb5c97858ace029f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:30:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-522qt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bdd78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.752070 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"600ac0db-9d2a-49c8-b58b-78f6f3ed60e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f8010716141c431c87894a8a22182ae06fd0671ad4bd6988888756f027a240d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2144a7f51ff3f41cfe760f2939d1b2d31ef9c9e4863fe8b26eaeda22a2f5ee23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2144a7f51ff3f41cfe760f2939d1b2d31ef9c9e4863fe8b26eaeda22a2f5ee23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.763944 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.774678 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a52efff2a918aa69204aaa42e3a3879d034baac2b6798c954e519d0e806a318e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whdm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-j7hns\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.786774 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e418ae3-9af9-445f-9b2d-c58699743512\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-13T06:29:48Z\\\",\\\"message\\\":\\\"file observer\\\\nW1213 06:29:47.886462 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1213 06:29:47.886716 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1213 06:29:47.888658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-112468906/tls.crt::/tmp/serving-cert-112468906/tls.key\\\\\\\"\\\\nI1213 06:29:48.748382 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1213 06:29:48.751616 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1213 06:29:48.751690 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1213 06:29:48.751765 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1213 06:29:48.751778 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1213 06:29:48.762066 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1213 06:29:48.762052 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1213 06:29:48.762154 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762160 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1213 06:29:48.762166 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1213 06:29:48.762170 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1213 06:29:48.762173 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1213 06:29:48.762175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1213 06:29:48.767699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:30Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:26Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.798782 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kkfct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"262dc4a8-4ed0-49a1-be9e-52071ce3b6b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68bff148e57ccfa90d168aa69e99b9540fa74c31e7f4f8ae1f0a19fec3890f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-66ldb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kkfct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.812544 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://916632b8712628f1859767069eba0217046143330164434ed1f4f16ff4baf4e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7f65dd3ec50da331cbc49f1009b7b3b1783b5a075d1033c2bb2752b54f83126\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.827720 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.827781 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.827804 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.827876 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.827902 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:06Z","lastTransitionTime":"2025-12-13T06:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.831869 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"caf986e7-b521-40fd-ae26-18716730d57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-13T06:30:58Z\\\",\\\"message\\\":\\\"Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-multus/multus-admission-controller_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-multus/multus-admission-controller\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.119\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.119\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1213 06:30:58.171372 7160 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-r42c6 in node crc\\\\nI1213 06:30:58.171386 7160 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-r42c6 after 0 failed attempt(s)\\\\nI1213 06:30:58.171397 7160 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-r42c6\\\\nI1213 06:30:58.171216 7160 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-13T06:30:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hfgcf_openshift-ovn-kubernetes(caf986e7-b521-40fd-ae26-18716730d57d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9blq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hfgcf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.846018 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3fa9d7e0-dc6b-4884-beba-d2f5d3d6f3d9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:30:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c880cc39f16d08a56ae903058580b30b83005a94239bc86b63ce2827d3e3338e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63a4b5bf19a626d27b5ff5bbe0faac497702e18b0003790f0165d788dcc3326b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cnrrp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:30:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gdm7w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.857794 5048 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9qddb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef455846-1e4c-4fc3-b6a7-de24f53ad9fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-13T06:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6fda04dbf3ebddf6c68758842742dc65565bc4a8bf09ced8ce52b554491235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-13T06:29:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9t2bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-13T06:29:49Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9qddb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-13T06:31:06Z is after 2025-08-24T17:21:41Z" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.931746 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.931784 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.931792 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.931807 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:06 crc kubenswrapper[5048]: I1213 06:31:06.931817 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:06Z","lastTransitionTime":"2025-12-13T06:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.035206 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.035346 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.035372 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.035406 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.035430 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:07Z","lastTransitionTime":"2025-12-13T06:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.138539 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.138592 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.138609 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.138632 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.138648 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:07Z","lastTransitionTime":"2025-12-13T06:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.242154 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.242212 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.242231 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.242253 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.242274 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:07Z","lastTransitionTime":"2025-12-13T06:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.345231 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.345301 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.345317 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.345341 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.345359 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:07Z","lastTransitionTime":"2025-12-13T06:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.448835 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.448885 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.448898 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.448917 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.448931 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:07Z","lastTransitionTime":"2025-12-13T06:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.551924 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.551996 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.552011 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.552031 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.552049 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:07Z","lastTransitionTime":"2025-12-13T06:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.566623 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:07 crc kubenswrapper[5048]: E1213 06:31:07.566766 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.654595 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.654657 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.654676 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.654699 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.654718 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:07Z","lastTransitionTime":"2025-12-13T06:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.758004 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.758071 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.758088 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.758115 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.758134 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:07Z","lastTransitionTime":"2025-12-13T06:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.860584 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.860634 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.860652 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.860668 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.860679 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:07Z","lastTransitionTime":"2025-12-13T06:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.963571 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.963617 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.963630 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.963648 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:07 crc kubenswrapper[5048]: I1213 06:31:07.963659 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:07Z","lastTransitionTime":"2025-12-13T06:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.067193 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.067245 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.067262 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.067286 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.067300 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:08Z","lastTransitionTime":"2025-12-13T06:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.171203 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.171260 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.171274 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.171294 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.171308 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:08Z","lastTransitionTime":"2025-12-13T06:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.274274 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.274342 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.274354 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.274373 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.274385 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:08Z","lastTransitionTime":"2025-12-13T06:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.377749 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.377786 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.377796 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.377810 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.377822 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:08Z","lastTransitionTime":"2025-12-13T06:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.480927 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.480968 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.480976 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.480991 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.480999 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:08Z","lastTransitionTime":"2025-12-13T06:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.567487 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:08 crc kubenswrapper[5048]: E1213 06:31:08.567675 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.567974 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:08 crc kubenswrapper[5048]: E1213 06:31:08.568086 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.568474 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:08 crc kubenswrapper[5048]: E1213 06:31:08.568581 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.583701 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.583751 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.583768 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.583791 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.583807 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:08Z","lastTransitionTime":"2025-12-13T06:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.687707 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.687822 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.687845 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.687919 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.687940 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:08Z","lastTransitionTime":"2025-12-13T06:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.791379 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.791498 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.791531 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.791555 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.791572 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:08Z","lastTransitionTime":"2025-12-13T06:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.893836 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.893887 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.893903 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.893926 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.893945 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:08Z","lastTransitionTime":"2025-12-13T06:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.997064 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.997100 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.997111 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.997127 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:08 crc kubenswrapper[5048]: I1213 06:31:08.997138 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:08Z","lastTransitionTime":"2025-12-13T06:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.073666 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs\") pod \"network-metrics-daemon-tm62z\" (UID: \"226b24e2-92c6-43d1-a621-09702ffa8fd4\") " pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:09 crc kubenswrapper[5048]: E1213 06:31:09.073901 5048 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 13 06:31:09 crc kubenswrapper[5048]: E1213 06:31:09.074034 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs podName:226b24e2-92c6-43d1-a621-09702ffa8fd4 nodeName:}" failed. No retries permitted until 2025-12-13 06:32:13.074001535 +0000 UTC m=+166.940596156 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs") pod "network-metrics-daemon-tm62z" (UID: "226b24e2-92c6-43d1-a621-09702ffa8fd4") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.099702 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.099760 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.099774 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.099794 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.099809 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:09Z","lastTransitionTime":"2025-12-13T06:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.202973 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.203029 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.203048 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.203072 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.203090 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:09Z","lastTransitionTime":"2025-12-13T06:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.307011 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.307302 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.307424 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.307556 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.307639 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:09Z","lastTransitionTime":"2025-12-13T06:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.409838 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.409901 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.409915 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.409935 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.409950 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:09Z","lastTransitionTime":"2025-12-13T06:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.513090 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.513140 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.513157 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.513176 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.513190 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:09Z","lastTransitionTime":"2025-12-13T06:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.565796 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:09 crc kubenswrapper[5048]: E1213 06:31:09.566084 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.615188 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.615511 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.615642 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.615742 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.615807 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:09Z","lastTransitionTime":"2025-12-13T06:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.718256 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.718305 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.718316 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.718335 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.718352 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:09Z","lastTransitionTime":"2025-12-13T06:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.821548 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.821614 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.821630 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.821652 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.821665 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:09Z","lastTransitionTime":"2025-12-13T06:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.924547 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.924601 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.924612 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.924630 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:09 crc kubenswrapper[5048]: I1213 06:31:09.924640 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:09Z","lastTransitionTime":"2025-12-13T06:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.027280 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.027326 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.027336 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.027353 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.027365 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:10Z","lastTransitionTime":"2025-12-13T06:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.129709 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.130127 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.130249 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.130374 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.130552 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:10Z","lastTransitionTime":"2025-12-13T06:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.233024 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.233285 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.233466 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.233575 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.233748 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:10Z","lastTransitionTime":"2025-12-13T06:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.337163 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.337498 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.337593 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.337780 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.337889 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:10Z","lastTransitionTime":"2025-12-13T06:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.440705 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.440750 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.440758 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.440771 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.440782 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:10Z","lastTransitionTime":"2025-12-13T06:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.543177 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.543490 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.543592 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.543699 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.543763 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:10Z","lastTransitionTime":"2025-12-13T06:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.566546 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:10 crc kubenswrapper[5048]: E1213 06:31:10.566937 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.566626 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.566626 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:10 crc kubenswrapper[5048]: E1213 06:31:10.567194 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:10 crc kubenswrapper[5048]: E1213 06:31:10.567298 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.647546 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.647613 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.647630 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.647650 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.647673 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:10Z","lastTransitionTime":"2025-12-13T06:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.751101 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.751163 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.751178 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.751198 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.751213 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:10Z","lastTransitionTime":"2025-12-13T06:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.853248 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.853278 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.853286 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.853299 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.853307 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:10Z","lastTransitionTime":"2025-12-13T06:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.956014 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.956053 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.956063 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.956077 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:10 crc kubenswrapper[5048]: I1213 06:31:10.956088 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:10Z","lastTransitionTime":"2025-12-13T06:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.059224 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.059302 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.059326 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.059350 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.059368 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:11Z","lastTransitionTime":"2025-12-13T06:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.161749 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.161793 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.161805 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.161822 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.161833 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:11Z","lastTransitionTime":"2025-12-13T06:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.264538 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.264569 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.264577 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.264591 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.264602 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:11Z","lastTransitionTime":"2025-12-13T06:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.367542 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.367598 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.367613 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.367636 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.367653 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:11Z","lastTransitionTime":"2025-12-13T06:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.470561 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.470618 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.470625 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.470643 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.470653 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:11Z","lastTransitionTime":"2025-12-13T06:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.566975 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:11 crc kubenswrapper[5048]: E1213 06:31:11.567258 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.573176 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.573219 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.573228 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.573246 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.573258 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:11Z","lastTransitionTime":"2025-12-13T06:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.676979 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.677036 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.677049 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.677069 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.677082 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:11Z","lastTransitionTime":"2025-12-13T06:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.780107 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.780198 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.780220 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.780249 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.780272 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:11Z","lastTransitionTime":"2025-12-13T06:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.883842 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.884282 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.884298 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.884320 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.884338 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:11Z","lastTransitionTime":"2025-12-13T06:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.988989 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.989179 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.989269 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.989383 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:11 crc kubenswrapper[5048]: I1213 06:31:11.989519 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:11Z","lastTransitionTime":"2025-12-13T06:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.092599 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.092684 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.092707 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.092738 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.092757 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:12Z","lastTransitionTime":"2025-12-13T06:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.196073 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.196151 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.196178 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.196208 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.196231 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:12Z","lastTransitionTime":"2025-12-13T06:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.299900 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.299939 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.299950 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.299966 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.299978 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:12Z","lastTransitionTime":"2025-12-13T06:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.402703 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.402782 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.402797 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.402816 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.402835 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:12Z","lastTransitionTime":"2025-12-13T06:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.505393 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.505497 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.505506 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.505519 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.505529 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:12Z","lastTransitionTime":"2025-12-13T06:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.566329 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.566396 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.566415 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:12 crc kubenswrapper[5048]: E1213 06:31:12.566576 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:12 crc kubenswrapper[5048]: E1213 06:31:12.566634 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:12 crc kubenswrapper[5048]: E1213 06:31:12.566719 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.608010 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.608076 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.608088 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.608116 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.608130 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:12Z","lastTransitionTime":"2025-12-13T06:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.711790 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.711862 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.711879 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.711902 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.711919 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:12Z","lastTransitionTime":"2025-12-13T06:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.814940 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.815047 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.815067 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.815100 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.815123 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:12Z","lastTransitionTime":"2025-12-13T06:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.918517 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.918590 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.918610 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.918632 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:12 crc kubenswrapper[5048]: I1213 06:31:12.918649 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:12Z","lastTransitionTime":"2025-12-13T06:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.021304 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.021346 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.021374 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.021388 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.021397 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:13Z","lastTransitionTime":"2025-12-13T06:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.124703 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.124743 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.124758 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.124776 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.124792 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:13Z","lastTransitionTime":"2025-12-13T06:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.227956 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.228005 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.228016 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.228035 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.228046 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:13Z","lastTransitionTime":"2025-12-13T06:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.331257 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.331321 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.331348 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.331365 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.331380 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:13Z","lastTransitionTime":"2025-12-13T06:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.435107 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.435164 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.435187 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.435208 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.435223 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:13Z","lastTransitionTime":"2025-12-13T06:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.538594 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.538653 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.538665 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.538683 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.538696 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:13Z","lastTransitionTime":"2025-12-13T06:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.565770 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:13 crc kubenswrapper[5048]: E1213 06:31:13.566012 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.566674 5048 scope.go:117] "RemoveContainer" containerID="4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2" Dec 13 06:31:13 crc kubenswrapper[5048]: E1213 06:31:13.566818 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hfgcf_openshift-ovn-kubernetes(caf986e7-b521-40fd-ae26-18716730d57d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" podUID="caf986e7-b521-40fd-ae26-18716730d57d" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.640830 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.640863 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.640871 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.640886 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.640895 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:13Z","lastTransitionTime":"2025-12-13T06:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.742807 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.742849 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.742858 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.742872 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.742880 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:13Z","lastTransitionTime":"2025-12-13T06:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.844911 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.844948 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.844955 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.844968 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.844976 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:13Z","lastTransitionTime":"2025-12-13T06:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.947472 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.947540 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.947597 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.947622 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:13 crc kubenswrapper[5048]: I1213 06:31:13.947639 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:13Z","lastTransitionTime":"2025-12-13T06:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.050882 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.050929 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.050937 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.050954 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.050968 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:14Z","lastTransitionTime":"2025-12-13T06:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.154126 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.154228 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.154244 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.154262 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.154274 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:14Z","lastTransitionTime":"2025-12-13T06:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.257279 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.257310 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.257319 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.257375 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.257399 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:14Z","lastTransitionTime":"2025-12-13T06:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.360172 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.360224 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.360233 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.360248 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.360261 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:14Z","lastTransitionTime":"2025-12-13T06:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.461988 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.462023 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.462032 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.462045 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.462054 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:14Z","lastTransitionTime":"2025-12-13T06:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.565324 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.565379 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.565414 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.565450 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.565462 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:14Z","lastTransitionTime":"2025-12-13T06:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.565899 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:14 crc kubenswrapper[5048]: E1213 06:31:14.566043 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.565905 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:14 crc kubenswrapper[5048]: E1213 06:31:14.566147 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.565909 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:14 crc kubenswrapper[5048]: E1213 06:31:14.566413 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.667505 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.667544 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.667556 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.667572 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.667585 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:14Z","lastTransitionTime":"2025-12-13T06:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.770553 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.770602 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.770612 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.770631 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.770642 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:14Z","lastTransitionTime":"2025-12-13T06:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.873375 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.873467 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.873480 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.873497 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.873508 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:14Z","lastTransitionTime":"2025-12-13T06:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.976685 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.976745 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.976763 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.976789 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:14 crc kubenswrapper[5048]: I1213 06:31:14.976809 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:14Z","lastTransitionTime":"2025-12-13T06:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.080521 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.080568 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.080581 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.080596 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.080608 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:15Z","lastTransitionTime":"2025-12-13T06:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.184039 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.184080 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.184090 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.184113 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.184127 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:15Z","lastTransitionTime":"2025-12-13T06:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.287261 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.287329 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.287351 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.287382 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.287400 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:15Z","lastTransitionTime":"2025-12-13T06:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.390651 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.390696 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.390708 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.390721 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.390731 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:15Z","lastTransitionTime":"2025-12-13T06:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.493821 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.493866 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.493880 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.493898 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.493911 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:15Z","lastTransitionTime":"2025-12-13T06:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.565895 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:15 crc kubenswrapper[5048]: E1213 06:31:15.566061 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.596757 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.596786 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.596795 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.596809 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.596817 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:15Z","lastTransitionTime":"2025-12-13T06:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.699139 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.699183 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.699192 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.699206 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.699215 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:15Z","lastTransitionTime":"2025-12-13T06:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.801857 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.801901 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.801913 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.801930 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.801941 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:15Z","lastTransitionTime":"2025-12-13T06:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.904814 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.904857 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.904869 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.904885 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:15 crc kubenswrapper[5048]: I1213 06:31:15.904899 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:15Z","lastTransitionTime":"2025-12-13T06:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.007678 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.007774 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.007816 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.007848 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.007875 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:16Z","lastTransitionTime":"2025-12-13T06:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.111018 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.111080 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.111098 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.111119 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.111133 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:16Z","lastTransitionTime":"2025-12-13T06:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.213915 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.213952 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.213961 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.213976 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.213985 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:16Z","lastTransitionTime":"2025-12-13T06:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.317244 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.317317 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.317329 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.317353 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.317367 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:16Z","lastTransitionTime":"2025-12-13T06:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.420197 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.420245 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.420261 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.420283 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.420300 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:16Z","lastTransitionTime":"2025-12-13T06:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.523586 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.523630 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.523640 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.523656 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.523666 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:16Z","lastTransitionTime":"2025-12-13T06:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.566526 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.566633 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.566767 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:16 crc kubenswrapper[5048]: E1213 06:31:16.566914 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:16 crc kubenswrapper[5048]: E1213 06:31:16.567094 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:16 crc kubenswrapper[5048]: E1213 06:31:16.567201 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.612684 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-r42c6" podStartSLOduration=87.612664249 podStartE2EDuration="1m27.612664249s" podCreationTimestamp="2025-12-13 06:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:31:16.612055142 +0000 UTC m=+110.478649753" watchObservedRunningTime="2025-12-13 06:31:16.612664249 +0000 UTC m=+110.479258830" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.625842 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.625908 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.625920 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.625935 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.625946 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:16Z","lastTransitionTime":"2025-12-13T06:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.635875 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=85.635858882 podStartE2EDuration="1m25.635858882s" podCreationTimestamp="2025-12-13 06:29:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:31:16.635583195 +0000 UTC m=+110.502177796" watchObservedRunningTime="2025-12-13 06:31:16.635858882 +0000 UTC m=+110.502453463" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.651574 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=87.651556351 podStartE2EDuration="1m27.651556351s" podCreationTimestamp="2025-12-13 06:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:31:16.651208412 +0000 UTC m=+110.517803023" watchObservedRunningTime="2025-12-13 06:31:16.651556351 +0000 UTC m=+110.518150932" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.662140 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=53.66211929 podStartE2EDuration="53.66211929s" podCreationTimestamp="2025-12-13 06:30:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:31:16.66176353 +0000 UTC m=+110.528358151" watchObservedRunningTime="2025-12-13 06:31:16.66211929 +0000 UTC m=+110.528713881" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.668476 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.668587 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.668601 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.668617 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.668628 5048 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-13T06:31:16Z","lastTransitionTime":"2025-12-13T06:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.707142 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-zccl6"] Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.707544 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zccl6" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.714085 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.714468 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.714790 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.715272 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.743175 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-bdd78" podStartSLOduration=87.743154314 podStartE2EDuration="1m27.743154314s" podCreationTimestamp="2025-12-13 06:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:31:16.732836861 +0000 UTC m=+110.599431462" watchObservedRunningTime="2025-12-13 06:31:16.743154314 +0000 UTC m=+110.609748895" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.759637 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/13cc7c35-acd8-492d-b290-e39ad518c34e-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-zccl6\" (UID: \"13cc7c35-acd8-492d-b290-e39ad518c34e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zccl6" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.759675 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/13cc7c35-acd8-492d-b290-e39ad518c34e-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-zccl6\" (UID: \"13cc7c35-acd8-492d-b290-e39ad518c34e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zccl6" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.759708 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/13cc7c35-acd8-492d-b290-e39ad518c34e-service-ca\") pod \"cluster-version-operator-5c965bbfc6-zccl6\" (UID: \"13cc7c35-acd8-492d-b290-e39ad518c34e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zccl6" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.759726 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/13cc7c35-acd8-492d-b290-e39ad518c34e-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-zccl6\" (UID: \"13cc7c35-acd8-492d-b290-e39ad518c34e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zccl6" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.759741 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/13cc7c35-acd8-492d-b290-e39ad518c34e-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-zccl6\" (UID: \"13cc7c35-acd8-492d-b290-e39ad518c34e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zccl6" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.767260 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=87.767237131 podStartE2EDuration="1m27.767237131s" podCreationTimestamp="2025-12-13 06:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:31:16.75619906 +0000 UTC m=+110.622793661" watchObservedRunningTime="2025-12-13 06:31:16.767237131 +0000 UTC m=+110.633831722" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.769245 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=45.769230995 podStartE2EDuration="45.769230995s" podCreationTimestamp="2025-12-13 06:30:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:31:16.767234471 +0000 UTC m=+110.633829052" watchObservedRunningTime="2025-12-13 06:31:16.769230995 +0000 UTC m=+110.635825576" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.795865 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podStartSLOduration=87.795843552 podStartE2EDuration="1m27.795843552s" podCreationTimestamp="2025-12-13 06:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:31:16.795832382 +0000 UTC m=+110.662426983" watchObservedRunningTime="2025-12-13 06:31:16.795843552 +0000 UTC m=+110.662438133" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.822619 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-kkfct" podStartSLOduration=87.822602614 podStartE2EDuration="1m27.822602614s" podCreationTimestamp="2025-12-13 06:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:31:16.822342006 +0000 UTC m=+110.688936607" watchObservedRunningTime="2025-12-13 06:31:16.822602614 +0000 UTC m=+110.689197195" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.855739 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-9qddb" podStartSLOduration=87.855722608 podStartE2EDuration="1m27.855722608s" podCreationTimestamp="2025-12-13 06:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:31:16.834261832 +0000 UTC m=+110.700856413" watchObservedRunningTime="2025-12-13 06:31:16.855722608 +0000 UTC m=+110.722317189" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.861129 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/13cc7c35-acd8-492d-b290-e39ad518c34e-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-zccl6\" (UID: \"13cc7c35-acd8-492d-b290-e39ad518c34e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zccl6" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.861163 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/13cc7c35-acd8-492d-b290-e39ad518c34e-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-zccl6\" (UID: \"13cc7c35-acd8-492d-b290-e39ad518c34e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zccl6" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.861186 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/13cc7c35-acd8-492d-b290-e39ad518c34e-service-ca\") pod \"cluster-version-operator-5c965bbfc6-zccl6\" (UID: \"13cc7c35-acd8-492d-b290-e39ad518c34e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zccl6" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.861204 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/13cc7c35-acd8-492d-b290-e39ad518c34e-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-zccl6\" (UID: \"13cc7c35-acd8-492d-b290-e39ad518c34e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zccl6" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.861221 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/13cc7c35-acd8-492d-b290-e39ad518c34e-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-zccl6\" (UID: \"13cc7c35-acd8-492d-b290-e39ad518c34e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zccl6" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.861489 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/13cc7c35-acd8-492d-b290-e39ad518c34e-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-zccl6\" (UID: \"13cc7c35-acd8-492d-b290-e39ad518c34e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zccl6" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.861533 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/13cc7c35-acd8-492d-b290-e39ad518c34e-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-zccl6\" (UID: \"13cc7c35-acd8-492d-b290-e39ad518c34e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zccl6" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.862537 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/13cc7c35-acd8-492d-b290-e39ad518c34e-service-ca\") pod \"cluster-version-operator-5c965bbfc6-zccl6\" (UID: \"13cc7c35-acd8-492d-b290-e39ad518c34e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zccl6" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.871978 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gdm7w" podStartSLOduration=86.871962002 podStartE2EDuration="1m26.871962002s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:31:16.871198661 +0000 UTC m=+110.737793262" watchObservedRunningTime="2025-12-13 06:31:16.871962002 +0000 UTC m=+110.738556583" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.879070 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/13cc7c35-acd8-492d-b290-e39ad518c34e-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-zccl6\" (UID: \"13cc7c35-acd8-492d-b290-e39ad518c34e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zccl6" Dec 13 06:31:16 crc kubenswrapper[5048]: I1213 06:31:16.882755 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/13cc7c35-acd8-492d-b290-e39ad518c34e-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-zccl6\" (UID: \"13cc7c35-acd8-492d-b290-e39ad518c34e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zccl6" Dec 13 06:31:17 crc kubenswrapper[5048]: I1213 06:31:17.022222 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zccl6" Dec 13 06:31:17 crc kubenswrapper[5048]: I1213 06:31:17.566347 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:17 crc kubenswrapper[5048]: E1213 06:31:17.566841 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:17 crc kubenswrapper[5048]: I1213 06:31:17.645160 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zccl6" event={"ID":"13cc7c35-acd8-492d-b290-e39ad518c34e","Type":"ContainerStarted","Data":"69feb7d0925fba3ed9565a33605eb522d325007787ce4f0049103c33d03b07c4"} Dec 13 06:31:17 crc kubenswrapper[5048]: I1213 06:31:17.645245 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zccl6" event={"ID":"13cc7c35-acd8-492d-b290-e39ad518c34e","Type":"ContainerStarted","Data":"6a33a4ab31172e058dea731dd37aa651adb483dbba904d0a9eb17f807e8b51c9"} Dec 13 06:31:18 crc kubenswrapper[5048]: I1213 06:31:18.565825 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:18 crc kubenswrapper[5048]: I1213 06:31:18.565896 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:18 crc kubenswrapper[5048]: E1213 06:31:18.566045 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:18 crc kubenswrapper[5048]: I1213 06:31:18.566133 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:18 crc kubenswrapper[5048]: E1213 06:31:18.566280 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:18 crc kubenswrapper[5048]: E1213 06:31:18.566674 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:19 crc kubenswrapper[5048]: I1213 06:31:19.565914 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:19 crc kubenswrapper[5048]: E1213 06:31:19.566583 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:20 crc kubenswrapper[5048]: I1213 06:31:20.565859 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:20 crc kubenswrapper[5048]: I1213 06:31:20.565872 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:20 crc kubenswrapper[5048]: E1213 06:31:20.566049 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:20 crc kubenswrapper[5048]: E1213 06:31:20.566203 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:20 crc kubenswrapper[5048]: I1213 06:31:20.567023 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:20 crc kubenswrapper[5048]: E1213 06:31:20.567163 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:21 crc kubenswrapper[5048]: I1213 06:31:21.566450 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:21 crc kubenswrapper[5048]: E1213 06:31:21.566607 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:22 crc kubenswrapper[5048]: I1213 06:31:22.566838 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:22 crc kubenswrapper[5048]: I1213 06:31:22.566903 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:22 crc kubenswrapper[5048]: E1213 06:31:22.567409 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:22 crc kubenswrapper[5048]: I1213 06:31:22.566946 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:22 crc kubenswrapper[5048]: E1213 06:31:22.567620 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:22 crc kubenswrapper[5048]: E1213 06:31:22.567643 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:23 crc kubenswrapper[5048]: I1213 06:31:23.566309 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:23 crc kubenswrapper[5048]: E1213 06:31:23.566526 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:24 crc kubenswrapper[5048]: I1213 06:31:24.566790 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:24 crc kubenswrapper[5048]: E1213 06:31:24.566933 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:24 crc kubenswrapper[5048]: I1213 06:31:24.566810 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:24 crc kubenswrapper[5048]: I1213 06:31:24.567031 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:24 crc kubenswrapper[5048]: E1213 06:31:24.567122 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:24 crc kubenswrapper[5048]: E1213 06:31:24.567208 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:25 crc kubenswrapper[5048]: I1213 06:31:25.566615 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:25 crc kubenswrapper[5048]: E1213 06:31:25.566899 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:26 crc kubenswrapper[5048]: E1213 06:31:26.521152 5048 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Dec 13 06:31:26 crc kubenswrapper[5048]: I1213 06:31:26.566095 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:26 crc kubenswrapper[5048]: I1213 06:31:26.566151 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:26 crc kubenswrapper[5048]: I1213 06:31:26.566112 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:26 crc kubenswrapper[5048]: E1213 06:31:26.567550 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:26 crc kubenswrapper[5048]: E1213 06:31:26.567800 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:26 crc kubenswrapper[5048]: E1213 06:31:26.568415 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:26 crc kubenswrapper[5048]: I1213 06:31:26.569103 5048 scope.go:117] "RemoveContainer" containerID="4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2" Dec 13 06:31:26 crc kubenswrapper[5048]: E1213 06:31:26.569369 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hfgcf_openshift-ovn-kubernetes(caf986e7-b521-40fd-ae26-18716730d57d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" podUID="caf986e7-b521-40fd-ae26-18716730d57d" Dec 13 06:31:26 crc kubenswrapper[5048]: I1213 06:31:26.676026 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-r42c6_627477f3-8fca-4b40-ace9-68d22f6b8576/kube-multus/1.log" Dec 13 06:31:26 crc kubenswrapper[5048]: I1213 06:31:26.676799 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-r42c6_627477f3-8fca-4b40-ace9-68d22f6b8576/kube-multus/0.log" Dec 13 06:31:26 crc kubenswrapper[5048]: I1213 06:31:26.676876 5048 generic.go:334] "Generic (PLEG): container finished" podID="627477f3-8fca-4b40-ace9-68d22f6b8576" containerID="29eacc23b1b0315e6101fa8981ce61752594e1488b26a1dfba5310a6893d0a9e" exitCode=1 Dec 13 06:31:26 crc kubenswrapper[5048]: I1213 06:31:26.676939 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-r42c6" event={"ID":"627477f3-8fca-4b40-ace9-68d22f6b8576","Type":"ContainerDied","Data":"29eacc23b1b0315e6101fa8981ce61752594e1488b26a1dfba5310a6893d0a9e"} Dec 13 06:31:26 crc kubenswrapper[5048]: I1213 06:31:26.676999 5048 scope.go:117] "RemoveContainer" containerID="c5e192e26e80e4d5bf1d350119b373cc884db1fe9e805a7b4503f9edde662f6a" Dec 13 06:31:26 crc kubenswrapper[5048]: I1213 06:31:26.677854 5048 scope.go:117] "RemoveContainer" containerID="29eacc23b1b0315e6101fa8981ce61752594e1488b26a1dfba5310a6893d0a9e" Dec 13 06:31:26 crc kubenswrapper[5048]: E1213 06:31:26.679066 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-r42c6_openshift-multus(627477f3-8fca-4b40-ace9-68d22f6b8576)\"" pod="openshift-multus/multus-r42c6" podUID="627477f3-8fca-4b40-ace9-68d22f6b8576" Dec 13 06:31:26 crc kubenswrapper[5048]: I1213 06:31:26.702790 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zccl6" podStartSLOduration=97.702765084 podStartE2EDuration="1m37.702765084s" podCreationTimestamp="2025-12-13 06:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:31:17.687741578 +0000 UTC m=+111.554336179" watchObservedRunningTime="2025-12-13 06:31:26.702765084 +0000 UTC m=+120.569359705" Dec 13 06:31:26 crc kubenswrapper[5048]: E1213 06:31:26.704583 5048 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 13 06:31:27 crc kubenswrapper[5048]: I1213 06:31:27.566632 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:27 crc kubenswrapper[5048]: E1213 06:31:27.566780 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:27 crc kubenswrapper[5048]: I1213 06:31:27.688308 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-r42c6_627477f3-8fca-4b40-ace9-68d22f6b8576/kube-multus/1.log" Dec 13 06:31:28 crc kubenswrapper[5048]: I1213 06:31:28.565797 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:28 crc kubenswrapper[5048]: I1213 06:31:28.565848 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:28 crc kubenswrapper[5048]: I1213 06:31:28.565902 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:28 crc kubenswrapper[5048]: E1213 06:31:28.565946 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:28 crc kubenswrapper[5048]: E1213 06:31:28.566051 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:28 crc kubenswrapper[5048]: E1213 06:31:28.566056 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:29 crc kubenswrapper[5048]: I1213 06:31:29.566824 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:29 crc kubenswrapper[5048]: E1213 06:31:29.566995 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:30 crc kubenswrapper[5048]: I1213 06:31:30.565850 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:30 crc kubenswrapper[5048]: I1213 06:31:30.565907 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:30 crc kubenswrapper[5048]: I1213 06:31:30.565947 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:30 crc kubenswrapper[5048]: E1213 06:31:30.565999 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:30 crc kubenswrapper[5048]: E1213 06:31:30.566118 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:30 crc kubenswrapper[5048]: E1213 06:31:30.566211 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:31 crc kubenswrapper[5048]: I1213 06:31:31.566623 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:31 crc kubenswrapper[5048]: E1213 06:31:31.566815 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:31 crc kubenswrapper[5048]: E1213 06:31:31.705978 5048 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 13 06:31:32 crc kubenswrapper[5048]: I1213 06:31:32.566151 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:32 crc kubenswrapper[5048]: I1213 06:31:32.566199 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:32 crc kubenswrapper[5048]: I1213 06:31:32.566308 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:32 crc kubenswrapper[5048]: E1213 06:31:32.566315 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:32 crc kubenswrapper[5048]: E1213 06:31:32.566369 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:32 crc kubenswrapper[5048]: E1213 06:31:32.567084 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:33 crc kubenswrapper[5048]: I1213 06:31:33.566264 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:33 crc kubenswrapper[5048]: E1213 06:31:33.566602 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:34 crc kubenswrapper[5048]: I1213 06:31:34.566317 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:34 crc kubenswrapper[5048]: I1213 06:31:34.566317 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:34 crc kubenswrapper[5048]: I1213 06:31:34.567005 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:34 crc kubenswrapper[5048]: E1213 06:31:34.567187 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:34 crc kubenswrapper[5048]: E1213 06:31:34.567247 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:34 crc kubenswrapper[5048]: E1213 06:31:34.567312 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:35 crc kubenswrapper[5048]: I1213 06:31:35.565964 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:35 crc kubenswrapper[5048]: E1213 06:31:35.567148 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:36 crc kubenswrapper[5048]: I1213 06:31:36.566717 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:36 crc kubenswrapper[5048]: I1213 06:31:36.566743 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:36 crc kubenswrapper[5048]: I1213 06:31:36.566785 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:36 crc kubenswrapper[5048]: E1213 06:31:36.568616 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:36 crc kubenswrapper[5048]: E1213 06:31:36.568735 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:36 crc kubenswrapper[5048]: E1213 06:31:36.569032 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:36 crc kubenswrapper[5048]: E1213 06:31:36.706749 5048 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 13 06:31:37 crc kubenswrapper[5048]: I1213 06:31:37.566461 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:37 crc kubenswrapper[5048]: E1213 06:31:37.566600 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:38 crc kubenswrapper[5048]: I1213 06:31:38.566144 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:38 crc kubenswrapper[5048]: I1213 06:31:38.566285 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:38 crc kubenswrapper[5048]: E1213 06:31:38.566452 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:38 crc kubenswrapper[5048]: I1213 06:31:38.566916 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:38 crc kubenswrapper[5048]: E1213 06:31:38.566997 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:38 crc kubenswrapper[5048]: E1213 06:31:38.567091 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:38 crc kubenswrapper[5048]: I1213 06:31:38.567643 5048 scope.go:117] "RemoveContainer" containerID="4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2" Dec 13 06:31:39 crc kubenswrapper[5048]: I1213 06:31:39.566481 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:39 crc kubenswrapper[5048]: I1213 06:31:39.566875 5048 scope.go:117] "RemoveContainer" containerID="29eacc23b1b0315e6101fa8981ce61752594e1488b26a1dfba5310a6893d0a9e" Dec 13 06:31:39 crc kubenswrapper[5048]: E1213 06:31:39.567021 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:39 crc kubenswrapper[5048]: I1213 06:31:39.683803 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-tm62z"] Dec 13 06:31:39 crc kubenswrapper[5048]: I1213 06:31:39.726717 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hfgcf_caf986e7-b521-40fd-ae26-18716730d57d/ovnkube-controller/3.log" Dec 13 06:31:39 crc kubenswrapper[5048]: I1213 06:31:39.730010 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerStarted","Data":"0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a"} Dec 13 06:31:39 crc kubenswrapper[5048]: I1213 06:31:39.730169 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:39 crc kubenswrapper[5048]: I1213 06:31:39.730480 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:31:39 crc kubenswrapper[5048]: E1213 06:31:39.730610 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:39 crc kubenswrapper[5048]: I1213 06:31:39.766426 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" podStartSLOduration=110.766403873 podStartE2EDuration="1m50.766403873s" podCreationTimestamp="2025-12-13 06:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:31:39.766391633 +0000 UTC m=+133.632986244" watchObservedRunningTime="2025-12-13 06:31:39.766403873 +0000 UTC m=+133.632998454" Dec 13 06:31:40 crc kubenswrapper[5048]: I1213 06:31:40.565781 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:40 crc kubenswrapper[5048]: I1213 06:31:40.565891 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:40 crc kubenswrapper[5048]: E1213 06:31:40.566204 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:40 crc kubenswrapper[5048]: E1213 06:31:40.566258 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:40 crc kubenswrapper[5048]: I1213 06:31:40.565906 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:40 crc kubenswrapper[5048]: E1213 06:31:40.566311 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:40 crc kubenswrapper[5048]: I1213 06:31:40.734592 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-r42c6_627477f3-8fca-4b40-ace9-68d22f6b8576/kube-multus/1.log" Dec 13 06:31:40 crc kubenswrapper[5048]: I1213 06:31:40.734686 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-r42c6" event={"ID":"627477f3-8fca-4b40-ace9-68d22f6b8576","Type":"ContainerStarted","Data":"595fa411012d7094d4f57667cd82ebf05e244f51840ad40e113a7580ee0a8b79"} Dec 13 06:31:41 crc kubenswrapper[5048]: I1213 06:31:41.565825 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:41 crc kubenswrapper[5048]: E1213 06:31:41.565962 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:41 crc kubenswrapper[5048]: E1213 06:31:41.708987 5048 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 13 06:31:42 crc kubenswrapper[5048]: I1213 06:31:42.565811 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:42 crc kubenswrapper[5048]: I1213 06:31:42.565874 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:42 crc kubenswrapper[5048]: E1213 06:31:42.565962 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:42 crc kubenswrapper[5048]: I1213 06:31:42.566129 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:42 crc kubenswrapper[5048]: E1213 06:31:42.566180 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:42 crc kubenswrapper[5048]: E1213 06:31:42.566309 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:43 crc kubenswrapper[5048]: I1213 06:31:43.565868 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:43 crc kubenswrapper[5048]: E1213 06:31:43.566113 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:44 crc kubenswrapper[5048]: I1213 06:31:44.565983 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:44 crc kubenswrapper[5048]: I1213 06:31:44.565986 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:44 crc kubenswrapper[5048]: E1213 06:31:44.566127 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:44 crc kubenswrapper[5048]: I1213 06:31:44.566210 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:44 crc kubenswrapper[5048]: E1213 06:31:44.567744 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:44 crc kubenswrapper[5048]: E1213 06:31:44.567683 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:45 crc kubenswrapper[5048]: I1213 06:31:45.566787 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:45 crc kubenswrapper[5048]: E1213 06:31:45.566951 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tm62z" podUID="226b24e2-92c6-43d1-a621-09702ffa8fd4" Dec 13 06:31:46 crc kubenswrapper[5048]: I1213 06:31:46.566018 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:46 crc kubenswrapper[5048]: E1213 06:31:46.567532 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 13 06:31:46 crc kubenswrapper[5048]: I1213 06:31:46.567560 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:46 crc kubenswrapper[5048]: I1213 06:31:46.567675 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:46 crc kubenswrapper[5048]: E1213 06:31:46.568172 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 13 06:31:46 crc kubenswrapper[5048]: E1213 06:31:46.568271 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 13 06:31:46 crc kubenswrapper[5048]: I1213 06:31:46.965229 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.004145 5048 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.039345 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.041925 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.044338 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.044342 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.045459 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgc8n"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.045924 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgc8n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.047809 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-svwnq"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.048416 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-svwnq" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.057977 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.058540 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.058669 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.058900 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.059006 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.059114 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.059477 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.060853 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-7ch79"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.061501 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-qcdrc"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.061811 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.079030 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7ch79" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.081064 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.082038 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.086276 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.086370 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.086685 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.086949 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.112520 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.113730 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.113818 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.113902 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.114694 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.117288 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-7w8rw"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.118005 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-7w8rw" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.119549 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.123277 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-5nt4n"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.123849 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-x5sc8"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.124357 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-x5sc8" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.124857 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.131322 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.132116 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.133827 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.133922 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.133953 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.134083 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-vtxwp"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.134486 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.134662 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-vtxwp" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.134667 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.134710 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.134959 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5qgj6"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.135589 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5qgj6" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.137632 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-7pgnt"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.137964 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-hxskn"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.138013 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.138347 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.138614 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.138673 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7pgnt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.138715 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.139317 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.139491 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.144177 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.145825 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.146044 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.146208 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.146356 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.151717 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.151943 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-xclb7"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.152302 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-dwk4m"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.152643 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.152971 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-xclb7" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.154167 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.154449 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.157142 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-75h2c"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.157902 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-w5fph"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.158384 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.158646 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-g9bxc"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.159266 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-g9bxc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.160143 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2n95"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.169016 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2n95" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.170698 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-75h2c" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.172078 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-7z7xz"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.174769 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.176402 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.176617 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.176880 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.177016 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.177152 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.178855 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.183119 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.184245 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.184576 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.186163 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.189202 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.202945 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.205777 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.206904 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.207155 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.208322 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-x4k67"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.208358 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.208887 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.209043 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.209579 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.209851 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.210117 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.210260 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.210475 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.210602 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.210714 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.210836 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.211035 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.211242 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.211420 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.211611 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.211739 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.211807 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.211905 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.211974 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.212059 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.212113 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.212140 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.212203 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.212270 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.212338 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.212395 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.212503 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.212591 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.212715 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.212735 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.211050 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-x4k67" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.212281 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.211753 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.211739 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.213092 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.213146 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.213187 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.212744 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.213242 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215057 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/48280256-4ec4-4530-a2f1-8bee7c6b4871-audit-dir\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215101 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/48280256-4ec4-4530-a2f1-8bee7c6b4871-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215145 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rf5gj\" (UniqueName: \"kubernetes.io/projected/4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3-kube-api-access-rf5gj\") pod \"cluster-image-registry-operator-dc59b4c8b-kgc8n\" (UID: \"4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgc8n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215170 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/48280256-4ec4-4530-a2f1-8bee7c6b4871-etcd-client\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215191 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6567f364-4ab2-489b-837f-1e7c194f311d-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-qcdrc\" (UID: \"6567f364-4ab2-489b-837f-1e7c194f311d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215211 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6567f364-4ab2-489b-837f-1e7c194f311d-client-ca\") pod \"controller-manager-879f6c89f-qcdrc\" (UID: \"6567f364-4ab2-489b-837f-1e7c194f311d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215235 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6567f364-4ab2-489b-837f-1e7c194f311d-config\") pod \"controller-manager-879f6c89f-qcdrc\" (UID: \"6567f364-4ab2-489b-837f-1e7c194f311d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215255 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-kgc8n\" (UID: \"4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgc8n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215276 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/48280256-4ec4-4530-a2f1-8bee7c6b4871-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215312 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dklj9\" (UniqueName: \"kubernetes.io/projected/bfeb748d-a196-4696-a599-70a6386cb89b-kube-api-access-dklj9\") pod \"machine-approver-56656f9798-7ch79\" (UID: \"bfeb748d-a196-4696-a599-70a6386cb89b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7ch79" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215332 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/48280256-4ec4-4530-a2f1-8bee7c6b4871-serving-cert\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215350 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5ed0932-7c88-4245-9403-8a0e72659f59-config\") pod \"openshift-apiserver-operator-796bbdcf4f-svwnq\" (UID: \"b5ed0932-7c88-4245-9403-8a0e72659f59\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-svwnq" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215380 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/bfeb748d-a196-4696-a599-70a6386cb89b-machine-approver-tls\") pod \"machine-approver-56656f9798-7ch79\" (UID: \"bfeb748d-a196-4696-a599-70a6386cb89b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7ch79" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215412 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-kgc8n\" (UID: \"4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgc8n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215431 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48bgk\" (UniqueName: \"kubernetes.io/projected/48280256-4ec4-4530-a2f1-8bee7c6b4871-kube-api-access-48bgk\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215468 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6567f364-4ab2-489b-837f-1e7c194f311d-serving-cert\") pod \"controller-manager-879f6c89f-qcdrc\" (UID: \"6567f364-4ab2-489b-837f-1e7c194f311d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215486 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bfeb748d-a196-4696-a599-70a6386cb89b-auth-proxy-config\") pod \"machine-approver-56656f9798-7ch79\" (UID: \"bfeb748d-a196-4696-a599-70a6386cb89b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7ch79" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215504 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/48280256-4ec4-4530-a2f1-8bee7c6b4871-audit-policies\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215522 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-kgc8n\" (UID: \"4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgc8n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215541 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cm8ds\" (UniqueName: \"kubernetes.io/projected/b5ed0932-7c88-4245-9403-8a0e72659f59-kube-api-access-cm8ds\") pod \"openshift-apiserver-operator-796bbdcf4f-svwnq\" (UID: \"b5ed0932-7c88-4245-9403-8a0e72659f59\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-svwnq" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215558 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8q7r\" (UniqueName: \"kubernetes.io/projected/6567f364-4ab2-489b-837f-1e7c194f311d-kube-api-access-x8q7r\") pod \"controller-manager-879f6c89f-qcdrc\" (UID: \"6567f364-4ab2-489b-837f-1e7c194f311d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215604 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b5ed0932-7c88-4245-9403-8a0e72659f59-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-svwnq\" (UID: \"b5ed0932-7c88-4245-9403-8a0e72659f59\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-svwnq" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215624 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfeb748d-a196-4696-a599-70a6386cb89b-config\") pod \"machine-approver-56656f9798-7ch79\" (UID: \"bfeb748d-a196-4696-a599-70a6386cb89b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7ch79" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.215658 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/48280256-4ec4-4530-a2f1-8bee7c6b4871-encryption-config\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.217651 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-m4qpv"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.221267 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.222982 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.223327 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.223502 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.223586 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-cxfwv"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.223738 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.223985 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.223997 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m4qpv" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.223982 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.224463 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.225377 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8gchm"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.225798 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-rtnpd"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.226105 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-4vp8z"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.226638 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-cxfwv" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.226743 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-zrtpr"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.227199 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.227645 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-rtnpd" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.227687 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.227893 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4vp8z" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.227962 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8gchm" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.228338 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.228677 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.228714 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zrtpr" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.228958 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-lwkxb"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.229857 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l5kdh"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.229902 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-lwkxb" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.231963 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.232666 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l5kdh" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.233106 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.233623 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.233946 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9qv2z"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.236966 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9qv2z" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.238726 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.240741 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.259326 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.263682 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r8m8l"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.264290 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.270280 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r8m8l" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.270324 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-895rs"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.271894 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-28jpz"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.272125 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-895rs" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.276757 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.278105 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.289467 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.290727 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-28jpz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.313455 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cwvrg"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.313678 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.315324 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rd7ph"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.316051 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-xkmc7"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.316302 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.316651 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-svwnq"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.316733 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-xkmc7" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.316907 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rd7ph" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.318428 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89rwn\" (UniqueName: \"kubernetes.io/projected/f567d62b-7941-466c-84c8-06f6854000ba-kube-api-access-89rwn\") pod \"downloads-7954f5f757-xclb7\" (UID: \"f567d62b-7941-466c-84c8-06f6854000ba\") " pod="openshift-console/downloads-7954f5f757-xclb7" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.318507 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b9c10fa1-af81-4b42-9c40-55c4a7aa6703-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-g9bxc\" (UID: \"b9c10fa1-af81-4b42-9c40-55c4a7aa6703\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-g9bxc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.318575 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-trusted-ca-bundle\") pod \"console-f9d7485db-5nt4n\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.318615 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.318662 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b9c10fa1-af81-4b42-9c40-55c4a7aa6703-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-g9bxc\" (UID: \"b9c10fa1-af81-4b42-9c40-55c4a7aa6703\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-g9bxc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.318701 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/058c0262-48b0-4176-9f52-20de42c46477-etcd-client\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.318732 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dklj9\" (UniqueName: \"kubernetes.io/projected/bfeb748d-a196-4696-a599-70a6386cb89b-kube-api-access-dklj9\") pod \"machine-approver-56656f9798-7ch79\" (UID: \"bfeb748d-a196-4696-a599-70a6386cb89b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7ch79" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.318770 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/48280256-4ec4-4530-a2f1-8bee7c6b4871-serving-cert\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.318801 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4-images\") pod \"machine-api-operator-5694c8668f-x5sc8\" (UID: \"81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5sc8" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.318825 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/058c0262-48b0-4176-9f52-20de42c46477-image-import-ca\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.318887 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5ed0932-7c88-4245-9403-8a0e72659f59-config\") pod \"openshift-apiserver-operator-796bbdcf4f-svwnq\" (UID: \"b5ed0932-7c88-4245-9403-8a0e72659f59\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-svwnq" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.319029 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/bfeb748d-a196-4696-a599-70a6386cb89b-machine-approver-tls\") pod \"machine-approver-56656f9798-7ch79\" (UID: \"bfeb748d-a196-4696-a599-70a6386cb89b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7ch79" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.319064 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5a408a68-6f27-4655-a067-3d2b08ad5a7d-serving-cert\") pod \"openshift-config-operator-7777fb866f-vtxwp\" (UID: \"5a408a68-6f27-4655-a067-3d2b08ad5a7d\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-vtxwp" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.319212 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/058c0262-48b0-4176-9f52-20de42c46477-node-pullsecrets\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.319502 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-x5sc8\" (UID: \"81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5sc8" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.320107 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/058c0262-48b0-4176-9f52-20de42c46477-serving-cert\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.320215 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/aa6cce4c-6bc2-469b-9062-e928744616db-audit-dir\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.320259 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-kgc8n\" (UID: \"4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgc8n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.320288 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48bgk\" (UniqueName: \"kubernetes.io/projected/48280256-4ec4-4530-a2f1-8bee7c6b4871-kube-api-access-48bgk\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.320612 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4n84d\" (UniqueName: \"kubernetes.io/projected/5a408a68-6f27-4655-a067-3d2b08ad5a7d-kube-api-access-4n84d\") pod \"openshift-config-operator-7777fb866f-vtxwp\" (UID: \"5a408a68-6f27-4655-a067-3d2b08ad5a7d\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-vtxwp" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.320833 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6567f364-4ab2-489b-837f-1e7c194f311d-serving-cert\") pod \"controller-manager-879f6c89f-qcdrc\" (UID: \"6567f364-4ab2-489b-837f-1e7c194f311d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.320905 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-console-config\") pod \"console-f9d7485db-5nt4n\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.320930 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-service-ca\") pod \"console-f9d7485db-5nt4n\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.320957 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/058c0262-48b0-4176-9f52-20de42c46477-audit-dir\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321007 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bfeb748d-a196-4696-a599-70a6386cb89b-auth-proxy-config\") pod \"machine-approver-56656f9798-7ch79\" (UID: \"bfeb748d-a196-4696-a599-70a6386cb89b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7ch79" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321035 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/058c0262-48b0-4176-9f52-20de42c46477-encryption-config\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321087 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f047ff9-f271-4f18-935e-f811afd75852-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-75h2c\" (UID: \"3f047ff9-f271-4f18-935e-f811afd75852\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-75h2c" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321139 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-kgc8n\" (UID: \"4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgc8n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321164 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/48280256-4ec4-4530-a2f1-8bee7c6b4871-audit-policies\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321190 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cm8ds\" (UniqueName: \"kubernetes.io/projected/b5ed0932-7c88-4245-9403-8a0e72659f59-kube-api-access-cm8ds\") pod \"openshift-apiserver-operator-796bbdcf4f-svwnq\" (UID: \"b5ed0932-7c88-4245-9403-8a0e72659f59\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-svwnq" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321231 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8q7r\" (UniqueName: \"kubernetes.io/projected/6567f364-4ab2-489b-837f-1e7c194f311d-kube-api-access-x8q7r\") pod \"controller-manager-879f6c89f-qcdrc\" (UID: \"6567f364-4ab2-489b-837f-1e7c194f311d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321252 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/058c0262-48b0-4176-9f52-20de42c46477-etcd-serving-ca\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321273 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfeb748d-a196-4696-a599-70a6386cb89b-config\") pod \"machine-approver-56656f9798-7ch79\" (UID: \"bfeb748d-a196-4696-a599-70a6386cb89b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7ch79" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321313 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/48280256-4ec4-4530-a2f1-8bee7c6b4871-encryption-config\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321332 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b5ed0932-7c88-4245-9403-8a0e72659f59-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-svwnq\" (UID: \"b5ed0932-7c88-4245-9403-8a0e72659f59\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-svwnq" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321354 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3f047ff9-f271-4f18-935e-f811afd75852-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-75h2c\" (UID: \"3f047ff9-f271-4f18-935e-f811afd75852\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-75h2c" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321392 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-audit-policies\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321414 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321471 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321507 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/48280256-4ec4-4530-a2f1-8bee7c6b4871-audit-dir\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321556 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/48280256-4ec4-4530-a2f1-8bee7c6b4871-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321579 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-psv92\" (UniqueName: \"kubernetes.io/projected/81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4-kube-api-access-psv92\") pod \"machine-api-operator-5694c8668f-x5sc8\" (UID: \"81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5sc8" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321619 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321644 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321668 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321712 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/058c0262-48b0-4176-9f52-20de42c46477-audit\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321733 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ab459e43-1b40-481c-901c-20344fb51434-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-7pgnt\" (UID: \"ab459e43-1b40-481c-901c-20344fb51434\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7pgnt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321754 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svbm2\" (UniqueName: \"kubernetes.io/projected/ab459e43-1b40-481c-901c-20344fb51434-kube-api-access-svbm2\") pod \"machine-config-controller-84d6567774-7pgnt\" (UID: \"ab459e43-1b40-481c-901c-20344fb51434\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7pgnt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321807 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/058c0262-48b0-4176-9f52-20de42c46477-config\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321893 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1de0ebfd-b283-4790-badb-fb78d80e6703-console-oauth-config\") pod \"console-f9d7485db-5nt4n\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321919 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-oauth-serving-cert\") pod \"console-f9d7485db-5nt4n\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321968 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zn766\" (UniqueName: \"kubernetes.io/projected/1de0ebfd-b283-4790-badb-fb78d80e6703-kube-api-access-zn766\") pod \"console-f9d7485db-5nt4n\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321985 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3f047ff9-f271-4f18-935e-f811afd75852-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-75h2c\" (UID: \"3f047ff9-f271-4f18-935e-f811afd75852\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-75h2c" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.322010 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/5a408a68-6f27-4655-a067-3d2b08ad5a7d-available-featuregates\") pod \"openshift-config-operator-7777fb866f-vtxwp\" (UID: \"5a408a68-6f27-4655-a067-3d2b08ad5a7d\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-vtxwp" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.322053 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ab459e43-1b40-481c-901c-20344fb51434-proxy-tls\") pod \"machine-config-controller-84d6567774-7pgnt\" (UID: \"ab459e43-1b40-481c-901c-20344fb51434\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7pgnt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.322075 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.322123 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rf5gj\" (UniqueName: \"kubernetes.io/projected/4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3-kube-api-access-rf5gj\") pod \"cluster-image-registry-operator-dc59b4c8b-kgc8n\" (UID: \"4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgc8n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.322143 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4-config\") pod \"machine-api-operator-5694c8668f-x5sc8\" (UID: \"81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5sc8" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.322162 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/058c0262-48b0-4176-9f52-20de42c46477-trusted-ca-bundle\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.322215 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6567f364-4ab2-489b-837f-1e7c194f311d-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-qcdrc\" (UID: \"6567f364-4ab2-489b-837f-1e7c194f311d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.322237 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/48280256-4ec4-4530-a2f1-8bee7c6b4871-etcd-client\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.322275 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.322302 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pw47j\" (UniqueName: \"kubernetes.io/projected/aa6cce4c-6bc2-469b-9062-e928744616db-kube-api-access-pw47j\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.322330 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6567f364-4ab2-489b-837f-1e7c194f311d-client-ca\") pod \"controller-manager-879f6c89f-qcdrc\" (UID: \"6567f364-4ab2-489b-837f-1e7c194f311d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.322371 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sz5vp\" (UniqueName: \"kubernetes.io/projected/058c0262-48b0-4176-9f52-20de42c46477-kube-api-access-sz5vp\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.322402 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6567f364-4ab2-489b-837f-1e7c194f311d-config\") pod \"controller-manager-879f6c89f-qcdrc\" (UID: \"6567f364-4ab2-489b-837f-1e7c194f311d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.322454 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.322477 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.322521 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1de0ebfd-b283-4790-badb-fb78d80e6703-console-serving-cert\") pod \"console-f9d7485db-5nt4n\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.322547 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-kgc8n\" (UID: \"4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgc8n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.322568 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/48280256-4ec4-4530-a2f1-8bee7c6b4871-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.322611 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8b20de69-950c-42d1-b967-d0fc59d035cd-metrics-tls\") pod \"dns-operator-744455d44c-x4k67\" (UID: \"8b20de69-950c-42d1-b967-d0fc59d035cd\") " pod="openshift-dns-operator/dns-operator-744455d44c-x4k67" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.322633 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmmkw\" (UniqueName: \"kubernetes.io/projected/8b20de69-950c-42d1-b967-d0fc59d035cd-kube-api-access-zmmkw\") pod \"dns-operator-744455d44c-x4k67\" (UID: \"8b20de69-950c-42d1-b967-d0fc59d035cd\") " pod="openshift-dns-operator/dns-operator-744455d44c-x4k67" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.322653 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9c10fa1-af81-4b42-9c40-55c4a7aa6703-config\") pod \"kube-apiserver-operator-766d6c64bb-g9bxc\" (UID: \"b9c10fa1-af81-4b42-9c40-55c4a7aa6703\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-g9bxc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.322692 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.321979 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-kgc8n\" (UID: \"4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgc8n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.324174 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5ed0932-7c88-4245-9403-8a0e72659f59-config\") pod \"openshift-apiserver-operator-796bbdcf4f-svwnq\" (UID: \"b5ed0932-7c88-4245-9403-8a0e72659f59\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-svwnq" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.325898 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6567f364-4ab2-489b-837f-1e7c194f311d-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-qcdrc\" (UID: \"6567f364-4ab2-489b-837f-1e7c194f311d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.327638 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6567f364-4ab2-489b-837f-1e7c194f311d-client-ca\") pod \"controller-manager-879f6c89f-qcdrc\" (UID: \"6567f364-4ab2-489b-837f-1e7c194f311d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.328287 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/48280256-4ec4-4530-a2f1-8bee7c6b4871-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.328771 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6567f364-4ab2-489b-837f-1e7c194f311d-config\") pod \"controller-manager-879f6c89f-qcdrc\" (UID: \"6567f364-4ab2-489b-837f-1e7c194f311d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.328856 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/48280256-4ec4-4530-a2f1-8bee7c6b4871-audit-dir\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.329495 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.339863 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-5nt4n"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.339924 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgc8n"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.341150 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/48280256-4ec4-4530-a2f1-8bee7c6b4871-audit-policies\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.341755 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/48280256-4ec4-4530-a2f1-8bee7c6b4871-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.345898 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bfeb748d-a196-4696-a599-70a6386cb89b-auth-proxy-config\") pod \"machine-approver-56656f9798-7ch79\" (UID: \"bfeb748d-a196-4696-a599-70a6386cb89b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7ch79" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.346338 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfeb748d-a196-4696-a599-70a6386cb89b-config\") pod \"machine-approver-56656f9798-7ch79\" (UID: \"bfeb748d-a196-4696-a599-70a6386cb89b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7ch79" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.350815 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6567f364-4ab2-489b-837f-1e7c194f311d-serving-cert\") pod \"controller-manager-879f6c89f-qcdrc\" (UID: \"6567f364-4ab2-489b-837f-1e7c194f311d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.353228 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/48280256-4ec4-4530-a2f1-8bee7c6b4871-etcd-client\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.353754 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.353930 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/48280256-4ec4-4530-a2f1-8bee7c6b4871-serving-cert\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.355648 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-vtxwp"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.357048 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-7w8rw"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.359480 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/bfeb748d-a196-4696-a599-70a6386cb89b-machine-approver-tls\") pod \"machine-approver-56656f9798-7ch79\" (UID: \"bfeb748d-a196-4696-a599-70a6386cb89b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7ch79" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.359518 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-x4k67"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.359574 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-6p4wm"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.360332 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-6p4wm" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.362871 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-7pgnt"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.363567 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b5ed0932-7c88-4245-9403-8a0e72659f59-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-svwnq\" (UID: \"b5ed0932-7c88-4245-9403-8a0e72659f59\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-svwnq" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.364508 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-x5sc8"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.364704 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/48280256-4ec4-4530-a2f1-8bee7c6b4871-encryption-config\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.364994 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-xclb7"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.368523 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-qcdrc"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.369490 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5qgj6"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.370053 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.375316 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-zrtpr"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.375383 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l5kdh"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.375398 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-cxfwv"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.377416 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-75h2c"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.378744 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-kgc8n\" (UID: \"4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgc8n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.379660 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-4vp8z"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.380566 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.381615 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-m4qpv"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.382698 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-dwk4m"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.384151 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-hxskn"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.384258 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.385240 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2n95"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.386332 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-w5fph"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.387554 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.388933 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-7z7xz"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.392516 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8gchm"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.393922 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9qv2z"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.394971 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-k7pxh"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.396636 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.396989 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-6zbpx"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.399036 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-6zbpx" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.399346 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-g9bxc"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.400861 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-6p4wm"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.404101 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.406103 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-lwkxb"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.406159 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r8m8l"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.408338 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.409635 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-28jpz"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.411734 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rd7ph"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.415910 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-6zbpx"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.418773 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-k7pxh"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.425173 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4-images\") pod \"machine-api-operator-5694c8668f-x5sc8\" (UID: \"81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5sc8" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.425238 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/058c0262-48b0-4176-9f52-20de42c46477-image-import-ca\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.425284 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5a408a68-6f27-4655-a067-3d2b08ad5a7d-serving-cert\") pod \"openshift-config-operator-7777fb866f-vtxwp\" (UID: \"5a408a68-6f27-4655-a067-3d2b08ad5a7d\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-vtxwp" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.425320 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/058c0262-48b0-4176-9f52-20de42c46477-node-pullsecrets\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.425359 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-x5sc8\" (UID: \"81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5sc8" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.425392 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/058c0262-48b0-4176-9f52-20de42c46477-serving-cert\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.425415 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/aa6cce4c-6bc2-469b-9062-e928744616db-audit-dir\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.425485 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4n84d\" (UniqueName: \"kubernetes.io/projected/5a408a68-6f27-4655-a067-3d2b08ad5a7d-kube-api-access-4n84d\") pod \"openshift-config-operator-7777fb866f-vtxwp\" (UID: \"5a408a68-6f27-4655-a067-3d2b08ad5a7d\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-vtxwp" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.425527 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-console-config\") pod \"console-f9d7485db-5nt4n\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.425551 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-service-ca\") pod \"console-f9d7485db-5nt4n\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.425584 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/058c0262-48b0-4176-9f52-20de42c46477-audit-dir\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.425616 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/058c0262-48b0-4176-9f52-20de42c46477-encryption-config\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.425647 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f047ff9-f271-4f18-935e-f811afd75852-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-75h2c\" (UID: \"3f047ff9-f271-4f18-935e-f811afd75852\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-75h2c" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.425718 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/058c0262-48b0-4176-9f52-20de42c46477-etcd-serving-ca\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.425747 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3f047ff9-f271-4f18-935e-f811afd75852-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-75h2c\" (UID: \"3f047ff9-f271-4f18-935e-f811afd75852\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-75h2c" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.425770 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-audit-policies\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.425794 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.425836 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.425876 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.431490 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.431534 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-psv92\" (UniqueName: \"kubernetes.io/projected/81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4-kube-api-access-psv92\") pod \"machine-api-operator-5694c8668f-x5sc8\" (UID: \"81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5sc8" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.431548 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/058c0262-48b0-4176-9f52-20de42c46477-image-import-ca\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.431562 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.431660 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svbm2\" (UniqueName: \"kubernetes.io/projected/ab459e43-1b40-481c-901c-20344fb51434-kube-api-access-svbm2\") pod \"machine-config-controller-84d6567774-7pgnt\" (UID: \"ab459e43-1b40-481c-901c-20344fb51434\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7pgnt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.431718 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/058c0262-48b0-4176-9f52-20de42c46477-audit\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.431741 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ab459e43-1b40-481c-901c-20344fb51434-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-7pgnt\" (UID: \"ab459e43-1b40-481c-901c-20344fb51434\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7pgnt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.431768 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/058c0262-48b0-4176-9f52-20de42c46477-config\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.431789 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1de0ebfd-b283-4790-badb-fb78d80e6703-console-oauth-config\") pod \"console-f9d7485db-5nt4n\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.431811 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-oauth-serving-cert\") pod \"console-f9d7485db-5nt4n\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.431833 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zn766\" (UniqueName: \"kubernetes.io/projected/1de0ebfd-b283-4790-badb-fb78d80e6703-kube-api-access-zn766\") pod \"console-f9d7485db-5nt4n\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.431857 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3f047ff9-f271-4f18-935e-f811afd75852-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-75h2c\" (UID: \"3f047ff9-f271-4f18-935e-f811afd75852\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-75h2c" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.431880 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ab459e43-1b40-481c-901c-20344fb51434-proxy-tls\") pod \"machine-config-controller-84d6567774-7pgnt\" (UID: \"ab459e43-1b40-481c-901c-20344fb51434\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7pgnt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.431901 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.431934 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/5a408a68-6f27-4655-a067-3d2b08ad5a7d-available-featuregates\") pod \"openshift-config-operator-7777fb866f-vtxwp\" (UID: \"5a408a68-6f27-4655-a067-3d2b08ad5a7d\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-vtxwp" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.431951 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/058c0262-48b0-4176-9f52-20de42c46477-trusted-ca-bundle\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.432009 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4-config\") pod \"machine-api-operator-5694c8668f-x5sc8\" (UID: \"81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5sc8" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.432062 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pw47j\" (UniqueName: \"kubernetes.io/projected/aa6cce4c-6bc2-469b-9062-e928744616db-kube-api-access-pw47j\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.432095 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.432160 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sz5vp\" (UniqueName: \"kubernetes.io/projected/058c0262-48b0-4176-9f52-20de42c46477-kube-api-access-sz5vp\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.432192 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.432242 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.432269 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1de0ebfd-b283-4790-badb-fb78d80e6703-console-serving-cert\") pod \"console-f9d7485db-5nt4n\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.432393 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.432420 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8b20de69-950c-42d1-b967-d0fc59d035cd-metrics-tls\") pod \"dns-operator-744455d44c-x4k67\" (UID: \"8b20de69-950c-42d1-b967-d0fc59d035cd\") " pod="openshift-dns-operator/dns-operator-744455d44c-x4k67" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.432551 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmmkw\" (UniqueName: \"kubernetes.io/projected/8b20de69-950c-42d1-b967-d0fc59d035cd-kube-api-access-zmmkw\") pod \"dns-operator-744455d44c-x4k67\" (UID: \"8b20de69-950c-42d1-b967-d0fc59d035cd\") " pod="openshift-dns-operator/dns-operator-744455d44c-x4k67" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.432572 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9c10fa1-af81-4b42-9c40-55c4a7aa6703-config\") pod \"kube-apiserver-operator-766d6c64bb-g9bxc\" (UID: \"b9c10fa1-af81-4b42-9c40-55c4a7aa6703\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-g9bxc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.432741 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89rwn\" (UniqueName: \"kubernetes.io/projected/f567d62b-7941-466c-84c8-06f6854000ba-kube-api-access-89rwn\") pod \"downloads-7954f5f757-xclb7\" (UID: \"f567d62b-7941-466c-84c8-06f6854000ba\") " pod="openshift-console/downloads-7954f5f757-xclb7" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.432727 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-audit-policies\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.432884 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b9c10fa1-af81-4b42-9c40-55c4a7aa6703-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-g9bxc\" (UID: \"b9c10fa1-af81-4b42-9c40-55c4a7aa6703\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-g9bxc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.433040 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-trusted-ca-bundle\") pod \"console-f9d7485db-5nt4n\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.433195 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.433252 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b9c10fa1-af81-4b42-9c40-55c4a7aa6703-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-g9bxc\" (UID: \"b9c10fa1-af81-4b42-9c40-55c4a7aa6703\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-g9bxc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.433386 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/058c0262-48b0-4176-9f52-20de42c46477-etcd-client\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.433193 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-service-ca\") pod \"console-f9d7485db-5nt4n\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.434051 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-x5sc8\" (UID: \"81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5sc8" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.434268 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/058c0262-48b0-4176-9f52-20de42c46477-node-pullsecrets\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.434523 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/aa6cce4c-6bc2-469b-9062-e928744616db-audit-dir\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.434679 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/058c0262-48b0-4176-9f52-20de42c46477-etcd-serving-ca\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.435418 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/058c0262-48b0-4176-9f52-20de42c46477-config\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.435470 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/058c0262-48b0-4176-9f52-20de42c46477-audit\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.435549 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.436464 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-oauth-serving-cert\") pod \"console-f9d7485db-5nt4n\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.436658 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ab459e43-1b40-481c-901c-20344fb51434-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-7pgnt\" (UID: \"ab459e43-1b40-481c-901c-20344fb51434\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7pgnt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.436743 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5a408a68-6f27-4655-a067-3d2b08ad5a7d-serving-cert\") pod \"openshift-config-operator-7777fb866f-vtxwp\" (UID: \"5a408a68-6f27-4655-a067-3d2b08ad5a7d\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-vtxwp" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.437473 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4-images\") pod \"machine-api-operator-5694c8668f-x5sc8\" (UID: \"81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5sc8" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.437514 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-xkmc7"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.437663 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.438273 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.429467 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-console-config\") pod \"console-f9d7485db-5nt4n\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.440147 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.440432 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/5a408a68-6f27-4655-a067-3d2b08ad5a7d-available-featuregates\") pod \"openshift-config-operator-7777fb866f-vtxwp\" (UID: \"5a408a68-6f27-4655-a067-3d2b08ad5a7d\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-vtxwp" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.441295 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1de0ebfd-b283-4790-badb-fb78d80e6703-console-serving-cert\") pod \"console-f9d7485db-5nt4n\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.441727 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.430375 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/058c0262-48b0-4176-9f52-20de42c46477-audit-dir\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.442064 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.442382 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1de0ebfd-b283-4790-badb-fb78d80e6703-console-oauth-config\") pod \"console-f9d7485db-5nt4n\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.442850 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cwvrg"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.442901 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/058c0262-48b0-4176-9f52-20de42c46477-trusted-ca-bundle\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.443185 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4-config\") pod \"machine-api-operator-5694c8668f-x5sc8\" (UID: \"81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5sc8" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.444717 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9c10fa1-af81-4b42-9c40-55c4a7aa6703-config\") pod \"kube-apiserver-operator-766d6c64bb-g9bxc\" (UID: \"b9c10fa1-af81-4b42-9c40-55c4a7aa6703\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-g9bxc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.445071 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f047ff9-f271-4f18-935e-f811afd75852-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-75h2c\" (UID: \"3f047ff9-f271-4f18-935e-f811afd75852\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-75h2c" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.446495 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ab459e43-1b40-481c-901c-20344fb51434-proxy-tls\") pod \"machine-config-controller-84d6567774-7pgnt\" (UID: \"ab459e43-1b40-481c-901c-20344fb51434\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7pgnt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.447782 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-895rs"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.447791 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.448503 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.448590 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3f047ff9-f271-4f18-935e-f811afd75852-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-75h2c\" (UID: \"3f047ff9-f271-4f18-935e-f811afd75852\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-75h2c" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.448657 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.448785 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/058c0262-48b0-4176-9f52-20de42c46477-etcd-client\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.448555 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.452510 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b9c10fa1-af81-4b42-9c40-55c4a7aa6703-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-g9bxc\" (UID: \"b9c10fa1-af81-4b42-9c40-55c4a7aa6703\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-g9bxc" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.452362 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-trusted-ca-bundle\") pod \"console-f9d7485db-5nt4n\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.452673 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.453033 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.453076 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.453474 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/058c0262-48b0-4176-9f52-20de42c46477-encryption-config\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.453654 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/058c0262-48b0-4176-9f52-20de42c46477-serving-cert\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.454689 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-gzp2z"] Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.455673 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-gzp2z" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.465302 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.505824 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.525374 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.544018 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.549228 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8b20de69-950c-42d1-b967-d0fc59d035cd-metrics-tls\") pod \"dns-operator-744455d44c-x4k67\" (UID: \"8b20de69-950c-42d1-b967-d0fc59d035cd\") " pod="openshift-dns-operator/dns-operator-744455d44c-x4k67" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.564911 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.565914 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.605037 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.625260 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.644237 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.664939 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.684046 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.705499 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.724924 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.751299 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.764032 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.784708 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.804964 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.825877 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.845975 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.866066 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.886134 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.904789 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.925302 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.944883 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.965422 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Dec 13 06:31:47 crc kubenswrapper[5048]: I1213 06:31:47.984708 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.005249 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.026347 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.046282 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.064956 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.085656 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.105312 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.126029 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.145393 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.165278 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.185559 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.205845 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.224877 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.242897 5048 request.go:700] Waited for 1.013493039s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operator-lifecycle-manager/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&limit=500&resourceVersion=0 Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.245814 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.271581 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.284603 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.306064 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.327076 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.344647 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.365567 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.385222 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.405232 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.425668 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.445835 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.465209 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.485039 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.505085 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.525360 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.546250 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.564867 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.566037 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.566052 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.566189 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.585699 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.604411 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.625959 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.645931 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.665838 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.685093 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.706226 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.726054 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.744808 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.774218 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.785469 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.805294 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.825045 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.864515 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dklj9\" (UniqueName: \"kubernetes.io/projected/bfeb748d-a196-4696-a599-70a6386cb89b-kube-api-access-dklj9\") pod \"machine-approver-56656f9798-7ch79\" (UID: \"bfeb748d-a196-4696-a599-70a6386cb89b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7ch79" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.882274 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48bgk\" (UniqueName: \"kubernetes.io/projected/48280256-4ec4-4530-a2f1-8bee7c6b4871-kube-api-access-48bgk\") pod \"apiserver-7bbb656c7d-ngbkz\" (UID: \"48280256-4ec4-4530-a2f1-8bee7c6b4871\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.902364 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rf5gj\" (UniqueName: \"kubernetes.io/projected/4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3-kube-api-access-rf5gj\") pod \"cluster-image-registry-operator-dc59b4c8b-kgc8n\" (UID: \"4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgc8n" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.919388 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cm8ds\" (UniqueName: \"kubernetes.io/projected/b5ed0932-7c88-4245-9403-8a0e72659f59-kube-api-access-cm8ds\") pod \"openshift-apiserver-operator-796bbdcf4f-svwnq\" (UID: \"b5ed0932-7c88-4245-9403-8a0e72659f59\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-svwnq" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.939932 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-svwnq" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.952932 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-kgc8n\" (UID: \"4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgc8n" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.962097 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8q7r\" (UniqueName: \"kubernetes.io/projected/6567f364-4ab2-489b-837f-1e7c194f311d-kube-api-access-x8q7r\") pod \"controller-manager-879f6c89f-qcdrc\" (UID: \"6567f364-4ab2-489b-837f-1e7c194f311d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.964067 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.974813 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" Dec 13 06:31:48 crc kubenswrapper[5048]: I1213 06:31:48.984888 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.005049 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.012952 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7ch79" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.025998 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Dec 13 06:31:49 crc kubenswrapper[5048]: W1213 06:31:49.034270 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbfeb748d_a196_4696_a599_70a6386cb89b.slice/crio-67ad5a2ec425122dee5180b44229a452f7240be8dae7576d7757400bc62287ee WatchSource:0}: Error finding container 67ad5a2ec425122dee5180b44229a452f7240be8dae7576d7757400bc62287ee: Status 404 returned error can't find the container with id 67ad5a2ec425122dee5180b44229a452f7240be8dae7576d7757400bc62287ee Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.087899 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.091886 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.091950 5048 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.105284 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.125763 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.146360 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.178379 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.185048 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3f047ff9-f271-4f18-935e-f811afd75852-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-75h2c\" (UID: \"3f047ff9-f271-4f18-935e-f811afd75852\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-75h2c" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.194656 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-svwnq"] Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.202154 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4n84d\" (UniqueName: \"kubernetes.io/projected/5a408a68-6f27-4655-a067-3d2b08ad5a7d-kube-api-access-4n84d\") pod \"openshift-config-operator-7777fb866f-vtxwp\" (UID: \"5a408a68-6f27-4655-a067-3d2b08ad5a7d\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-vtxwp" Dec 13 06:31:49 crc kubenswrapper[5048]: W1213 06:31:49.212911 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb5ed0932_7c88_4245_9403_8a0e72659f59.slice/crio-8c9b5d47a98bad4c91dcf65adbd999cf4bd7be2ddbdf5ecf4797999a21e8f75b WatchSource:0}: Error finding container 8c9b5d47a98bad4c91dcf65adbd999cf4bd7be2ddbdf5ecf4797999a21e8f75b: Status 404 returned error can't find the container with id 8c9b5d47a98bad4c91dcf65adbd999cf4bd7be2ddbdf5ecf4797999a21e8f75b Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.216121 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgc8n" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.225832 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svbm2\" (UniqueName: \"kubernetes.io/projected/ab459e43-1b40-481c-901c-20344fb51434-kube-api-access-svbm2\") pod \"machine-config-controller-84d6567774-7pgnt\" (UID: \"ab459e43-1b40-481c-901c-20344fb51434\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7pgnt" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.236722 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-qcdrc"] Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.241372 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zn766\" (UniqueName: \"kubernetes.io/projected/1de0ebfd-b283-4790-badb-fb78d80e6703-kube-api-access-zn766\") pod \"console-f9d7485db-5nt4n\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.243095 5048 request.go:700] Waited for 1.805217185s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/serviceaccounts/oauth-openshift/token Dec 13 06:31:49 crc kubenswrapper[5048]: W1213 06:31:49.263268 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6567f364_4ab2_489b_837f_1e7c194f311d.slice/crio-04ec043e47c1dbba24ca669cd1953332691573bfade510dcbfe974df9f38e22e WatchSource:0}: Error finding container 04ec043e47c1dbba24ca669cd1953332691573bfade510dcbfe974df9f38e22e: Status 404 returned error can't find the container with id 04ec043e47c1dbba24ca669cd1953332691573bfade510dcbfe974df9f38e22e Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.270350 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pw47j\" (UniqueName: \"kubernetes.io/projected/aa6cce4c-6bc2-469b-9062-e928744616db-kube-api-access-pw47j\") pod \"oauth-openshift-558db77b4-dwk4m\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.292808 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-75h2c" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.296775 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-psv92\" (UniqueName: \"kubernetes.io/projected/81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4-kube-api-access-psv92\") pod \"machine-api-operator-5694c8668f-x5sc8\" (UID: \"81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5sc8" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.303103 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sz5vp\" (UniqueName: \"kubernetes.io/projected/058c0262-48b0-4176-9f52-20de42c46477-kube-api-access-sz5vp\") pod \"apiserver-76f77b778f-hxskn\" (UID: \"058c0262-48b0-4176-9f52-20de42c46477\") " pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.322279 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b9c10fa1-af81-4b42-9c40-55c4a7aa6703-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-g9bxc\" (UID: \"b9c10fa1-af81-4b42-9c40-55c4a7aa6703\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-g9bxc" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.338877 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-x5sc8" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.347412 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmmkw\" (UniqueName: \"kubernetes.io/projected/8b20de69-950c-42d1-b967-d0fc59d035cd-kube-api-access-zmmkw\") pod \"dns-operator-744455d44c-x4k67\" (UID: \"8b20de69-950c-42d1-b967-d0fc59d035cd\") " pod="openshift-dns-operator/dns-operator-744455d44c-x4k67" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.365663 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.371405 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89rwn\" (UniqueName: \"kubernetes.io/projected/f567d62b-7941-466c-84c8-06f6854000ba-kube-api-access-89rwn\") pod \"downloads-7954f5f757-xclb7\" (UID: \"f567d62b-7941-466c-84c8-06f6854000ba\") " pod="openshift-console/downloads-7954f5f757-xclb7" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.372921 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.385964 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.405419 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.413802 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-vtxwp" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.415308 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz"] Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.444846 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.465300 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.477830 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgc8n"] Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.506424 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.526702 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.544474 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.565060 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.565564 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-75h2c"] Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.764903 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-svwnq" event={"ID":"b5ed0932-7c88-4245-9403-8a0e72659f59","Type":"ContainerStarted","Data":"8c9b5d47a98bad4c91dcf65adbd999cf4bd7be2ddbdf5ecf4797999a21e8f75b"} Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.765677 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7ch79" event={"ID":"bfeb748d-a196-4696-a599-70a6386cb89b","Type":"ContainerStarted","Data":"67ad5a2ec425122dee5180b44229a452f7240be8dae7576d7757400bc62287ee"} Dec 13 06:31:49 crc kubenswrapper[5048]: I1213 06:31:49.767870 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" event={"ID":"6567f364-4ab2-489b-837f-1e7c194f311d","Type":"ContainerStarted","Data":"04ec043e47c1dbba24ca669cd1953332691573bfade510dcbfe974df9f38e22e"} Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.350261 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7pgnt" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.350704 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.350927 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-xclb7" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.350995 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-g9bxc" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.351195 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.351502 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-x4k67" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.352972 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.353064 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/36ee331b-baa0-42ac-9bd3-7c52253814e1-ca-trust-extracted\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.353093 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/36ee331b-baa0-42ac-9bd3-7c52253814e1-registry-tls\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: E1213 06:31:50.353985 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:50.853965629 +0000 UTC m=+144.720560210 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:50 crc kubenswrapper[5048]: W1213 06:31:50.369826 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4fe5a2bb_7e2b_4cf4_bb1b_a0be5c9027e3.slice/crio-04785b6a8e9cd06351d05f93703dddcdbdfa809ffaba1d93079d5ec32065157a WatchSource:0}: Error finding container 04785b6a8e9cd06351d05f93703dddcdbdfa809ffaba1d93079d5ec32065157a: Status 404 returned error can't find the container with id 04785b6a8e9cd06351d05f93703dddcdbdfa809ffaba1d93079d5ec32065157a Dec 13 06:31:50 crc kubenswrapper[5048]: W1213 06:31:50.373594 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3f047ff9_f271_4f18_935e_f811afd75852.slice/crio-2e7ca2d19cdad66801827e506fc98ae5e10fd9ca602ba57725b4ac6bbd4b6853 WatchSource:0}: Error finding container 2e7ca2d19cdad66801827e506fc98ae5e10fd9ca602ba57725b4ac6bbd4b6853: Status 404 returned error can't find the container with id 2e7ca2d19cdad66801827e506fc98ae5e10fd9ca602ba57725b4ac6bbd4b6853 Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.455184 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:50 crc kubenswrapper[5048]: E1213 06:31:50.455666 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:50.955633145 +0000 UTC m=+144.822227726 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.456850 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jt626\" (UniqueName: \"kubernetes.io/projected/4b48e4b5-ade1-4838-8866-692179d8418a-kube-api-access-jt626\") pod \"openshift-controller-manager-operator-756b6f6bc6-t2n95\" (UID: \"4b48e4b5-ade1-4838-8866-692179d8418a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2n95" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.457045 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2754f9a4-6375-4864-9d69-7674e3dfe490-serving-cert\") pod \"route-controller-manager-6576b87f9c-lzrgn\" (UID: \"2754f9a4-6375-4864-9d69-7674e3dfe490\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.457312 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2754f9a4-6375-4864-9d69-7674e3dfe490-client-ca\") pod \"route-controller-manager-6576b87f9c-lzrgn\" (UID: \"2754f9a4-6375-4864-9d69-7674e3dfe490\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.457530 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmrsg\" (UniqueName: \"kubernetes.io/projected/493b8318-26e3-4f4e-b5d1-e1b2fd57de35-kube-api-access-vmrsg\") pod \"cluster-samples-operator-665b6dd947-5qgj6\" (UID: \"493b8318-26e3-4f4e-b5d1-e1b2fd57de35\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5qgj6" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.458857 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfwjt\" (UniqueName: \"kubernetes.io/projected/c5a72af5-cc5a-4d16-acc9-2126107c6b6c-kube-api-access-jfwjt\") pod \"machine-config-operator-74547568cd-m4qpv\" (UID: \"c5a72af5-cc5a-4d16-acc9-2126107c6b6c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m4qpv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.459745 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6365e1b5-5ddc-4f83-8388-6ffc37bf18e2-serving-cert\") pod \"authentication-operator-69f744f599-7w8rw\" (UID: \"6365e1b5-5ddc-4f83-8388-6ffc37bf18e2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7w8rw" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.460311 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.460351 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/36ee331b-baa0-42ac-9bd3-7c52253814e1-registry-certificates\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.460380 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kq676\" (UniqueName: \"kubernetes.io/projected/d0451ecc-c881-4b85-a6af-63796033ef83-kube-api-access-kq676\") pod \"etcd-operator-b45778765-w5fph\" (UID: \"d0451ecc-c881-4b85-a6af-63796033ef83\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.460405 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/493b8318-26e3-4f4e-b5d1-e1b2fd57de35-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-5qgj6\" (UID: \"493b8318-26e3-4f4e-b5d1-e1b2fd57de35\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5qgj6" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.460483 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c5a72af5-cc5a-4d16-acc9-2126107c6b6c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-m4qpv\" (UID: \"c5a72af5-cc5a-4d16-acc9-2126107c6b6c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m4qpv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.460516 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6365e1b5-5ddc-4f83-8388-6ffc37bf18e2-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-7w8rw\" (UID: \"6365e1b5-5ddc-4f83-8388-6ffc37bf18e2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7w8rw" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.460538 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c5a72af5-cc5a-4d16-acc9-2126107c6b6c-proxy-tls\") pod \"machine-config-operator-74547568cd-m4qpv\" (UID: \"c5a72af5-cc5a-4d16-acc9-2126107c6b6c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m4qpv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.460595 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2754f9a4-6375-4864-9d69-7674e3dfe490-config\") pod \"route-controller-manager-6576b87f9c-lzrgn\" (UID: \"2754f9a4-6375-4864-9d69-7674e3dfe490\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.460616 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c5a72af5-cc5a-4d16-acc9-2126107c6b6c-images\") pod \"machine-config-operator-74547568cd-m4qpv\" (UID: \"c5a72af5-cc5a-4d16-acc9-2126107c6b6c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m4qpv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.460654 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6365e1b5-5ddc-4f83-8388-6ffc37bf18e2-config\") pod \"authentication-operator-69f744f599-7w8rw\" (UID: \"6365e1b5-5ddc-4f83-8388-6ffc37bf18e2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7w8rw" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.460692 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b48e4b5-ade1-4838-8866-692179d8418a-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-t2n95\" (UID: \"4b48e4b5-ade1-4838-8866-692179d8418a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2n95" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.460713 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/d0451ecc-c881-4b85-a6af-63796033ef83-etcd-service-ca\") pod \"etcd-operator-b45778765-w5fph\" (UID: \"d0451ecc-c881-4b85-a6af-63796033ef83\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.460735 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d0451ecc-c881-4b85-a6af-63796033ef83-serving-cert\") pod \"etcd-operator-b45778765-w5fph\" (UID: \"d0451ecc-c881-4b85-a6af-63796033ef83\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.460778 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbc8x\" (UniqueName: \"kubernetes.io/projected/36ee331b-baa0-42ac-9bd3-7c52253814e1-kube-api-access-kbc8x\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.460805 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwqbj\" (UniqueName: \"kubernetes.io/projected/6365e1b5-5ddc-4f83-8388-6ffc37bf18e2-kube-api-access-mwqbj\") pod \"authentication-operator-69f744f599-7w8rw\" (UID: \"6365e1b5-5ddc-4f83-8388-6ffc37bf18e2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7w8rw" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.460830 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d0451ecc-c881-4b85-a6af-63796033ef83-etcd-client\") pod \"etcd-operator-b45778765-w5fph\" (UID: \"d0451ecc-c881-4b85-a6af-63796033ef83\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.460855 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/36ee331b-baa0-42ac-9bd3-7c52253814e1-bound-sa-token\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.460903 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0451ecc-c881-4b85-a6af-63796033ef83-config\") pod \"etcd-operator-b45778765-w5fph\" (UID: \"d0451ecc-c881-4b85-a6af-63796033ef83\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.460929 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e941413-0d6f-4fe7-bfc1-218b8bc6d026-config\") pod \"kube-controller-manager-operator-78b949d7b-8gchm\" (UID: \"2e941413-0d6f-4fe7-bfc1-218b8bc6d026\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8gchm" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.461027 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6skd\" (UniqueName: \"kubernetes.io/projected/2754f9a4-6375-4864-9d69-7674e3dfe490-kube-api-access-p6skd\") pod \"route-controller-manager-6576b87f9c-lzrgn\" (UID: \"2754f9a4-6375-4864-9d69-7674e3dfe490\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.461052 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/36ee331b-baa0-42ac-9bd3-7c52253814e1-trusted-ca\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.461078 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2e941413-0d6f-4fe7-bfc1-218b8bc6d026-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8gchm\" (UID: \"2e941413-0d6f-4fe7-bfc1-218b8bc6d026\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8gchm" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.461098 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2423ea84-11dc-47d0-8d1d-1ce24260f02e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-895rs\" (UID: \"2423ea84-11dc-47d0-8d1d-1ce24260f02e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-895rs" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.461116 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dp45t\" (UniqueName: \"kubernetes.io/projected/2423ea84-11dc-47d0-8d1d-1ce24260f02e-kube-api-access-dp45t\") pod \"multus-admission-controller-857f4d67dd-895rs\" (UID: \"2423ea84-11dc-47d0-8d1d-1ce24260f02e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-895rs" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.461184 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/d0451ecc-c881-4b85-a6af-63796033ef83-etcd-ca\") pod \"etcd-operator-b45778765-w5fph\" (UID: \"d0451ecc-c881-4b85-a6af-63796033ef83\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.461204 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b48e4b5-ade1-4838-8866-692179d8418a-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-t2n95\" (UID: \"4b48e4b5-ade1-4838-8866-692179d8418a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2n95" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.461237 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/36ee331b-baa0-42ac-9bd3-7c52253814e1-ca-trust-extracted\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.461278 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2e941413-0d6f-4fe7-bfc1-218b8bc6d026-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8gchm\" (UID: \"2e941413-0d6f-4fe7-bfc1-218b8bc6d026\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8gchm" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.461314 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6365e1b5-5ddc-4f83-8388-6ffc37bf18e2-service-ca-bundle\") pod \"authentication-operator-69f744f599-7w8rw\" (UID: \"6365e1b5-5ddc-4f83-8388-6ffc37bf18e2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7w8rw" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.461343 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/36ee331b-baa0-42ac-9bd3-7c52253814e1-registry-tls\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.461364 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/36ee331b-baa0-42ac-9bd3-7c52253814e1-installation-pull-secrets\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.462667 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/36ee331b-baa0-42ac-9bd3-7c52253814e1-ca-trust-extracted\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: E1213 06:31:50.462680 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:50.962649042 +0000 UTC m=+144.829243623 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.487283 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/36ee331b-baa0-42ac-9bd3-7c52253814e1-registry-tls\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.562489 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.562831 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qg9l8\" (UniqueName: \"kubernetes.io/projected/9d2b0c42-857f-4f52-8742-02c7ee9a9fd3-kube-api-access-qg9l8\") pod \"migrator-59844c95c7-xkmc7\" (UID: \"9d2b0c42-857f-4f52-8742-02c7ee9a9fd3\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-xkmc7" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.562888 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/493b8318-26e3-4f4e-b5d1-e1b2fd57de35-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-5qgj6\" (UID: \"493b8318-26e3-4f4e-b5d1-e1b2fd57de35\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5qgj6" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.562909 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c5a72af5-cc5a-4d16-acc9-2126107c6b6c-proxy-tls\") pod \"machine-config-operator-74547568cd-m4qpv\" (UID: \"c5a72af5-cc5a-4d16-acc9-2126107c6b6c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m4qpv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.562928 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c5a72af5-cc5a-4d16-acc9-2126107c6b6c-images\") pod \"machine-config-operator-74547568cd-m4qpv\" (UID: \"c5a72af5-cc5a-4d16-acc9-2126107c6b6c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m4qpv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.562951 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6a6efd27-be04-47b3-8f2c-fa84e687e4f2-bound-sa-token\") pod \"ingress-operator-5b745b69d9-zrtpr\" (UID: \"6a6efd27-be04-47b3-8f2c-fa84e687e4f2\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zrtpr" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.562985 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/603fdfea-79ed-48f4-89b0-2231fe8fed87-metrics-tls\") pod \"dns-default-6p4wm\" (UID: \"603fdfea-79ed-48f4-89b0-2231fe8fed87\") " pod="openshift-dns/dns-default-6p4wm" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.563009 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d0451ecc-c881-4b85-a6af-63796033ef83-serving-cert\") pod \"etcd-operator-b45778765-w5fph\" (UID: \"d0451ecc-c881-4b85-a6af-63796033ef83\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.563032 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrh4r\" (UniqueName: \"kubernetes.io/projected/5dc88be1-84b6-4574-9b80-de53c49a7b1e-kube-api-access-vrh4r\") pod \"machine-config-server-gzp2z\" (UID: \"5dc88be1-84b6-4574-9b80-de53c49a7b1e\") " pod="openshift-machine-config-operator/machine-config-server-gzp2z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.563056 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27a55ec7-3458-4125-aacf-9c6a91b5145b-config\") pod \"console-operator-58897d9998-cxfwv\" (UID: \"27a55ec7-3458-4125-aacf-9c6a91b5145b\") " pod="openshift-console-operator/console-operator-58897d9998-cxfwv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.563076 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbc8x\" (UniqueName: \"kubernetes.io/projected/36ee331b-baa0-42ac-9bd3-7c52253814e1-kube-api-access-kbc8x\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.563094 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwqbj\" (UniqueName: \"kubernetes.io/projected/6365e1b5-5ddc-4f83-8388-6ffc37bf18e2-kube-api-access-mwqbj\") pod \"authentication-operator-69f744f599-7w8rw\" (UID: \"6365e1b5-5ddc-4f83-8388-6ffc37bf18e2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7w8rw" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.563112 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d0451ecc-c881-4b85-a6af-63796033ef83-etcd-client\") pod \"etcd-operator-b45778765-w5fph\" (UID: \"d0451ecc-c881-4b85-a6af-63796033ef83\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.563132 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9njl\" (UniqueName: \"kubernetes.io/projected/4359b12c-c2c7-484a-978d-388afe815a40-kube-api-access-v9njl\") pod \"kube-storage-version-migrator-operator-b67b599dd-rd7ph\" (UID: \"4359b12c-c2c7-484a-978d-388afe815a40\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rd7ph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.563204 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/36ee331b-baa0-42ac-9bd3-7c52253814e1-trusted-ca\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.563227 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/1e226353-ea51-4aa1-921b-63a11a769cc7-registration-dir\") pod \"csi-hostpathplugin-k7pxh\" (UID: \"1e226353-ea51-4aa1-921b-63a11a769cc7\") " pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" Dec 13 06:31:50 crc kubenswrapper[5048]: E1213 06:31:50.563273 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:51.063226799 +0000 UTC m=+144.929821530 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.563344 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4359b12c-c2c7-484a-978d-388afe815a40-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-rd7ph\" (UID: \"4359b12c-c2c7-484a-978d-388afe815a40\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rd7ph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.563418 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2e941413-0d6f-4fe7-bfc1-218b8bc6d026-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8gchm\" (UID: \"2e941413-0d6f-4fe7-bfc1-218b8bc6d026\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8gchm" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.563478 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2423ea84-11dc-47d0-8d1d-1ce24260f02e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-895rs\" (UID: \"2423ea84-11dc-47d0-8d1d-1ce24260f02e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-895rs" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.563525 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkhlf\" (UniqueName: \"kubernetes.io/projected/7c4ae147-0445-404f-9c19-bd55e71ceae8-kube-api-access-dkhlf\") pod \"packageserver-d55dfcdfc-9tcn7\" (UID: \"7c4ae147-0445-404f-9c19-bd55e71ceae8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.563581 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b48e4b5-ade1-4838-8866-692179d8418a-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-t2n95\" (UID: \"4b48e4b5-ade1-4838-8866-692179d8418a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2n95" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.563630 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/54b2c6a4-d68f-4e1d-a686-626abdb6e127-metrics-certs\") pod \"router-default-5444994796-rtnpd\" (UID: \"54b2c6a4-d68f-4e1d-a686-626abdb6e127\") " pod="openshift-ingress/router-default-5444994796-rtnpd" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.563677 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pjjk\" (UniqueName: \"kubernetes.io/projected/603fdfea-79ed-48f4-89b0-2231fe8fed87-kube-api-access-6pjjk\") pod \"dns-default-6p4wm\" (UID: \"603fdfea-79ed-48f4-89b0-2231fe8fed87\") " pod="openshift-dns/dns-default-6p4wm" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.563704 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/d37b6448-0b50-4bf9-b345-0feec5f5babb-signing-key\") pod \"service-ca-9c57cc56f-lwkxb\" (UID: \"d37b6448-0b50-4bf9-b345-0feec5f5babb\") " pod="openshift-service-ca/service-ca-9c57cc56f-lwkxb" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.563792 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4359b12c-c2c7-484a-978d-388afe815a40-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-rd7ph\" (UID: \"4359b12c-c2c7-484a-978d-388afe815a40\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rd7ph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.568997 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c5a72af5-cc5a-4d16-acc9-2126107c6b6c-images\") pod \"machine-config-operator-74547568cd-m4qpv\" (UID: \"c5a72af5-cc5a-4d16-acc9-2126107c6b6c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m4qpv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.572263 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5dc88be1-84b6-4574-9b80-de53c49a7b1e-node-bootstrap-token\") pod \"machine-config-server-gzp2z\" (UID: \"5dc88be1-84b6-4574-9b80-de53c49a7b1e\") " pod="openshift-machine-config-operator/machine-config-server-gzp2z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.572380 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/cdcbb504-e2d6-4511-bf24-d18ba641f45b-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-l5kdh\" (UID: \"cdcbb504-e2d6-4511-bf24-d18ba641f45b\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l5kdh" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.572427 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/6a24e010-6ffa-45f6-9b92-99896a2c287f-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-28jpz\" (UID: \"6a24e010-6ffa-45f6-9b92-99896a2c287f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-28jpz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.572599 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05dc53d2-6b17-42e1-b211-43716c5f3037-config\") pod \"service-ca-operator-777779d784-4vp8z\" (UID: \"05dc53d2-6b17-42e1-b211-43716c5f3037\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4vp8z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.572628 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pws8\" (UniqueName: \"kubernetes.io/projected/27a55ec7-3458-4125-aacf-9c6a91b5145b-kube-api-access-2pws8\") pod \"console-operator-58897d9998-cxfwv\" (UID: \"27a55ec7-3458-4125-aacf-9c6a91b5145b\") " pod="openshift-console-operator/console-operator-58897d9998-cxfwv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.572981 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/7c4ae147-0445-404f-9c19-bd55e71ceae8-tmpfs\") pod \"packageserver-d55dfcdfc-9tcn7\" (UID: \"7c4ae147-0445-404f-9c19-bd55e71ceae8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.573066 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/05dc53d2-6b17-42e1-b211-43716c5f3037-serving-cert\") pod \"service-ca-operator-777779d784-4vp8z\" (UID: \"05dc53d2-6b17-42e1-b211-43716c5f3037\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4vp8z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.574333 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5v4qk\" (UniqueName: \"kubernetes.io/projected/cdcbb504-e2d6-4511-bf24-d18ba641f45b-kube-api-access-5v4qk\") pod \"control-plane-machine-set-operator-78cbb6b69f-l5kdh\" (UID: \"cdcbb504-e2d6-4511-bf24-d18ba641f45b\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l5kdh" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.574373 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/54b2c6a4-d68f-4e1d-a686-626abdb6e127-stats-auth\") pod \"router-default-5444994796-rtnpd\" (UID: \"54b2c6a4-d68f-4e1d-a686-626abdb6e127\") " pod="openshift-ingress/router-default-5444994796-rtnpd" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.574392 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6a6efd27-be04-47b3-8f2c-fa84e687e4f2-metrics-tls\") pod \"ingress-operator-5b745b69d9-zrtpr\" (UID: \"6a6efd27-be04-47b3-8f2c-fa84e687e4f2\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zrtpr" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.574461 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmrsg\" (UniqueName: \"kubernetes.io/projected/493b8318-26e3-4f4e-b5d1-e1b2fd57de35-kube-api-access-vmrsg\") pod \"cluster-samples-operator-665b6dd947-5qgj6\" (UID: \"493b8318-26e3-4f4e-b5d1-e1b2fd57de35\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5qgj6" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.574482 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5dc88be1-84b6-4574-9b80-de53c49a7b1e-certs\") pod \"machine-config-server-gzp2z\" (UID: \"5dc88be1-84b6-4574-9b80-de53c49a7b1e\") " pod="openshift-machine-config-operator/machine-config-server-gzp2z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.574533 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/1e226353-ea51-4aa1-921b-63a11a769cc7-mountpoint-dir\") pod \"csi-hostpathplugin-k7pxh\" (UID: \"1e226353-ea51-4aa1-921b-63a11a769cc7\") " pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.574576 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a458fdb3-0662-44a0-8df6-b81dcb66a669-config-volume\") pod \"collect-profiles-29426790-559jq\" (UID: \"a458fdb3-0662-44a0-8df6-b81dcb66a669\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.574606 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6365e1b5-5ddc-4f83-8388-6ffc37bf18e2-serving-cert\") pod \"authentication-operator-69f744f599-7w8rw\" (UID: \"6365e1b5-5ddc-4f83-8388-6ffc37bf18e2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7w8rw" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.574665 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wh6v\" (UniqueName: \"kubernetes.io/projected/54b2c6a4-d68f-4e1d-a686-626abdb6e127-kube-api-access-4wh6v\") pod \"router-default-5444994796-rtnpd\" (UID: \"54b2c6a4-d68f-4e1d-a686-626abdb6e127\") " pod="openshift-ingress/router-default-5444994796-rtnpd" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.574686 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mfsl\" (UniqueName: \"kubernetes.io/projected/6111e5a8-1616-417d-a3d6-d7b1a39ec709-kube-api-access-9mfsl\") pod \"marketplace-operator-79b997595-cwvrg\" (UID: \"6111e5a8-1616-417d-a3d6-d7b1a39ec709\") " pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.574704 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6a6efd27-be04-47b3-8f2c-fa84e687e4f2-trusted-ca\") pod \"ingress-operator-5b745b69d9-zrtpr\" (UID: \"6a6efd27-be04-47b3-8f2c-fa84e687e4f2\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zrtpr" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.574726 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/36ee331b-baa0-42ac-9bd3-7c52253814e1-registry-certificates\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.574747 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kq676\" (UniqueName: \"kubernetes.io/projected/d0451ecc-c881-4b85-a6af-63796033ef83-kube-api-access-kq676\") pod \"etcd-operator-b45778765-w5fph\" (UID: \"d0451ecc-c881-4b85-a6af-63796033ef83\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.574802 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c5a72af5-cc5a-4d16-acc9-2126107c6b6c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-m4qpv\" (UID: \"c5a72af5-cc5a-4d16-acc9-2126107c6b6c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m4qpv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.574823 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6365e1b5-5ddc-4f83-8388-6ffc37bf18e2-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-7w8rw\" (UID: \"6365e1b5-5ddc-4f83-8388-6ffc37bf18e2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7w8rw" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.574840 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/1e226353-ea51-4aa1-921b-63a11a769cc7-socket-dir\") pod \"csi-hostpathplugin-k7pxh\" (UID: \"1e226353-ea51-4aa1-921b-63a11a769cc7\") " pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.574870 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6365e1b5-5ddc-4f83-8388-6ffc37bf18e2-config\") pod \"authentication-operator-69f744f599-7w8rw\" (UID: \"6365e1b5-5ddc-4f83-8388-6ffc37bf18e2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7w8rw" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.574889 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2754f9a4-6375-4864-9d69-7674e3dfe490-config\") pod \"route-controller-manager-6576b87f9c-lzrgn\" (UID: \"2754f9a4-6375-4864-9d69-7674e3dfe490\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.574907 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/603fdfea-79ed-48f4-89b0-2231fe8fed87-config-volume\") pod \"dns-default-6p4wm\" (UID: \"603fdfea-79ed-48f4-89b0-2231fe8fed87\") " pod="openshift-dns/dns-default-6p4wm" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.574935 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b48e4b5-ade1-4838-8866-692179d8418a-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-t2n95\" (UID: \"4b48e4b5-ade1-4838-8866-692179d8418a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2n95" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.582943 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4rvw\" (UniqueName: \"kubernetes.io/projected/e2896f64-29a2-4d9e-9836-e0abe2a8162f-kube-api-access-j4rvw\") pod \"olm-operator-6b444d44fb-9qv2z\" (UID: \"e2896f64-29a2-4d9e-9836-e0abe2a8162f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9qv2z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.583003 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/d0451ecc-c881-4b85-a6af-63796033ef83-etcd-service-ca\") pod \"etcd-operator-b45778765-w5fph\" (UID: \"d0451ecc-c881-4b85-a6af-63796033ef83\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.583027 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dncxg\" (UniqueName: \"kubernetes.io/projected/6a6efd27-be04-47b3-8f2c-fa84e687e4f2-kube-api-access-dncxg\") pod \"ingress-operator-5b745b69d9-zrtpr\" (UID: \"6a6efd27-be04-47b3-8f2c-fa84e687e4f2\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zrtpr" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.583049 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/d37b6448-0b50-4bf9-b345-0feec5f5babb-signing-cabundle\") pod \"service-ca-9c57cc56f-lwkxb\" (UID: \"d37b6448-0b50-4bf9-b345-0feec5f5babb\") " pod="openshift-service-ca/service-ca-9c57cc56f-lwkxb" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.583092 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/36ee331b-baa0-42ac-9bd3-7c52253814e1-bound-sa-token\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.583112 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/1e226353-ea51-4aa1-921b-63a11a769cc7-plugins-dir\") pod \"csi-hostpathplugin-k7pxh\" (UID: \"1e226353-ea51-4aa1-921b-63a11a769cc7\") " pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.583133 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e941413-0d6f-4fe7-bfc1-218b8bc6d026-config\") pod \"kube-controller-manager-operator-78b949d7b-8gchm\" (UID: \"2e941413-0d6f-4fe7-bfc1-218b8bc6d026\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8gchm" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.583153 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6nxxq\" (UniqueName: \"kubernetes.io/projected/05dc53d2-6b17-42e1-b211-43716c5f3037-kube-api-access-6nxxq\") pod \"service-ca-operator-777779d784-4vp8z\" (UID: \"05dc53d2-6b17-42e1-b211-43716c5f3037\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4vp8z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.583197 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0451ecc-c881-4b85-a6af-63796033ef83-config\") pod \"etcd-operator-b45778765-w5fph\" (UID: \"d0451ecc-c881-4b85-a6af-63796033ef83\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.583457 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6skd\" (UniqueName: \"kubernetes.io/projected/2754f9a4-6375-4864-9d69-7674e3dfe490-kube-api-access-p6skd\") pod \"route-controller-manager-6576b87f9c-lzrgn\" (UID: \"2754f9a4-6375-4864-9d69-7674e3dfe490\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.583531 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/1e226353-ea51-4aa1-921b-63a11a769cc7-csi-data-dir\") pod \"csi-hostpathplugin-k7pxh\" (UID: \"1e226353-ea51-4aa1-921b-63a11a769cc7\") " pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.584338 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dp45t\" (UniqueName: \"kubernetes.io/projected/2423ea84-11dc-47d0-8d1d-1ce24260f02e-kube-api-access-dp45t\") pod \"multus-admission-controller-857f4d67dd-895rs\" (UID: \"2423ea84-11dc-47d0-8d1d-1ce24260f02e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-895rs" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.584378 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qgpcl\" (UniqueName: \"kubernetes.io/projected/3a66da44-fbdf-4676-8250-0b146b67a5e9-kube-api-access-qgpcl\") pod \"catalog-operator-68c6474976-r8m8l\" (UID: \"3a66da44-fbdf-4676-8250-0b146b67a5e9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r8m8l" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.584396 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzz8n\" (UniqueName: \"kubernetes.io/projected/d37b6448-0b50-4bf9-b345-0feec5f5babb-kube-api-access-gzz8n\") pod \"service-ca-9c57cc56f-lwkxb\" (UID: \"d37b6448-0b50-4bf9-b345-0feec5f5babb\") " pod="openshift-service-ca/service-ca-9c57cc56f-lwkxb" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.584422 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/e2896f64-29a2-4d9e-9836-e0abe2a8162f-srv-cert\") pod \"olm-operator-6b444d44fb-9qv2z\" (UID: \"e2896f64-29a2-4d9e-9836-e0abe2a8162f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9qv2z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.584455 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/d0451ecc-c881-4b85-a6af-63796033ef83-etcd-ca\") pod \"etcd-operator-b45778765-w5fph\" (UID: \"d0451ecc-c881-4b85-a6af-63796033ef83\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.584482 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a458fdb3-0662-44a0-8df6-b81dcb66a669-secret-volume\") pod \"collect-profiles-29426790-559jq\" (UID: \"a458fdb3-0662-44a0-8df6-b81dcb66a669\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.584512 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2e941413-0d6f-4fe7-bfc1-218b8bc6d026-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8gchm\" (UID: \"2e941413-0d6f-4fe7-bfc1-218b8bc6d026\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8gchm" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.584530 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7mqm\" (UniqueName: \"kubernetes.io/projected/6a24e010-6ffa-45f6-9b92-99896a2c287f-kube-api-access-x7mqm\") pod \"package-server-manager-789f6589d5-28jpz\" (UID: \"6a24e010-6ffa-45f6-9b92-99896a2c287f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-28jpz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.584551 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5v67\" (UniqueName: \"kubernetes.io/projected/a458fdb3-0662-44a0-8df6-b81dcb66a669-kube-api-access-d5v67\") pod \"collect-profiles-29426790-559jq\" (UID: \"a458fdb3-0662-44a0-8df6-b81dcb66a669\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.584586 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/e2896f64-29a2-4d9e-9836-e0abe2a8162f-profile-collector-cert\") pod \"olm-operator-6b444d44fb-9qv2z\" (UID: \"e2896f64-29a2-4d9e-9836-e0abe2a8162f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9qv2z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.584608 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6111e5a8-1616-417d-a3d6-d7b1a39ec709-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-cwvrg\" (UID: \"6111e5a8-1616-417d-a3d6-d7b1a39ec709\") " pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.584744 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0451ecc-c881-4b85-a6af-63796033ef83-config\") pod \"etcd-operator-b45778765-w5fph\" (UID: \"d0451ecc-c881-4b85-a6af-63796033ef83\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.584778 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6365e1b5-5ddc-4f83-8388-6ffc37bf18e2-service-ca-bundle\") pod \"authentication-operator-69f744f599-7w8rw\" (UID: \"6365e1b5-5ddc-4f83-8388-6ffc37bf18e2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7w8rw" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.584798 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c4d0beae-b187-40c3-bb35-aac05eec25b0-cert\") pod \"ingress-canary-6zbpx\" (UID: \"c4d0beae-b187-40c3-bb35-aac05eec25b0\") " pod="openshift-ingress-canary/ingress-canary-6zbpx" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.584832 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/36ee331b-baa0-42ac-9bd3-7c52253814e1-installation-pull-secrets\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.584849 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3a66da44-fbdf-4676-8250-0b146b67a5e9-profile-collector-cert\") pod \"catalog-operator-68c6474976-r8m8l\" (UID: \"3a66da44-fbdf-4676-8250-0b146b67a5e9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r8m8l" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.584986 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jt626\" (UniqueName: \"kubernetes.io/projected/4b48e4b5-ade1-4838-8866-692179d8418a-kube-api-access-jt626\") pod \"openshift-controller-manager-operator-756b6f6bc6-t2n95\" (UID: \"4b48e4b5-ade1-4838-8866-692179d8418a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2n95" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.585014 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7c4ae147-0445-404f-9c19-bd55e71ceae8-webhook-cert\") pod \"packageserver-d55dfcdfc-9tcn7\" (UID: \"7c4ae147-0445-404f-9c19-bd55e71ceae8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.585044 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/27a55ec7-3458-4125-aacf-9c6a91b5145b-serving-cert\") pod \"console-operator-58897d9998-cxfwv\" (UID: \"27a55ec7-3458-4125-aacf-9c6a91b5145b\") " pod="openshift-console-operator/console-operator-58897d9998-cxfwv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.585067 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2754f9a4-6375-4864-9d69-7674e3dfe490-serving-cert\") pod \"route-controller-manager-6576b87f9c-lzrgn\" (UID: \"2754f9a4-6375-4864-9d69-7674e3dfe490\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.585090 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/54b2c6a4-d68f-4e1d-a686-626abdb6e127-default-certificate\") pod \"router-default-5444994796-rtnpd\" (UID: \"54b2c6a4-d68f-4e1d-a686-626abdb6e127\") " pod="openshift-ingress/router-default-5444994796-rtnpd" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.585129 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2754f9a4-6375-4864-9d69-7674e3dfe490-client-ca\") pod \"route-controller-manager-6576b87f9c-lzrgn\" (UID: \"2754f9a4-6375-4864-9d69-7674e3dfe490\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.585224 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/27a55ec7-3458-4125-aacf-9c6a91b5145b-trusted-ca\") pod \"console-operator-58897d9998-cxfwv\" (UID: \"27a55ec7-3458-4125-aacf-9c6a91b5145b\") " pod="openshift-console-operator/console-operator-58897d9998-cxfwv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.585339 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/54b2c6a4-d68f-4e1d-a686-626abdb6e127-service-ca-bundle\") pod \"router-default-5444994796-rtnpd\" (UID: \"54b2c6a4-d68f-4e1d-a686-626abdb6e127\") " pod="openshift-ingress/router-default-5444994796-rtnpd" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.585363 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6111e5a8-1616-417d-a3d6-d7b1a39ec709-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-cwvrg\" (UID: \"6111e5a8-1616-417d-a3d6-d7b1a39ec709\") " pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.585381 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b48e4b5-ade1-4838-8866-692179d8418a-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-t2n95\" (UID: \"4b48e4b5-ade1-4838-8866-692179d8418a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2n95" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.585449 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfwjt\" (UniqueName: \"kubernetes.io/projected/c5a72af5-cc5a-4d16-acc9-2126107c6b6c-kube-api-access-jfwjt\") pod \"machine-config-operator-74547568cd-m4qpv\" (UID: \"c5a72af5-cc5a-4d16-acc9-2126107c6b6c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m4qpv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.587196 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/36ee331b-baa0-42ac-9bd3-7c52253814e1-trusted-ca\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.587834 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/36ee331b-baa0-42ac-9bd3-7c52253814e1-registry-certificates\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.588256 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c5a72af5-cc5a-4d16-acc9-2126107c6b6c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-m4qpv\" (UID: \"c5a72af5-cc5a-4d16-acc9-2126107c6b6c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m4qpv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.588653 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/493b8318-26e3-4f4e-b5d1-e1b2fd57de35-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-5qgj6\" (UID: \"493b8318-26e3-4f4e-b5d1-e1b2fd57de35\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5qgj6" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.589009 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6365e1b5-5ddc-4f83-8388-6ffc37bf18e2-serving-cert\") pod \"authentication-operator-69f744f599-7w8rw\" (UID: \"6365e1b5-5ddc-4f83-8388-6ffc37bf18e2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7w8rw" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.595612 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/d0451ecc-c881-4b85-a6af-63796033ef83-etcd-ca\") pod \"etcd-operator-b45778765-w5fph\" (UID: \"d0451ecc-c881-4b85-a6af-63796033ef83\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.602941 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2754f9a4-6375-4864-9d69-7674e3dfe490-client-ca\") pod \"route-controller-manager-6576b87f9c-lzrgn\" (UID: \"2754f9a4-6375-4864-9d69-7674e3dfe490\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.603027 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjnkt\" (UniqueName: \"kubernetes.io/projected/c4d0beae-b187-40c3-bb35-aac05eec25b0-kube-api-access-pjnkt\") pod \"ingress-canary-6zbpx\" (UID: \"c4d0beae-b187-40c3-bb35-aac05eec25b0\") " pod="openshift-ingress-canary/ingress-canary-6zbpx" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.603094 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3a66da44-fbdf-4676-8250-0b146b67a5e9-srv-cert\") pod \"catalog-operator-68c6474976-r8m8l\" (UID: \"3a66da44-fbdf-4676-8250-0b146b67a5e9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r8m8l" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.603117 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7c4ae147-0445-404f-9c19-bd55e71ceae8-apiservice-cert\") pod \"packageserver-d55dfcdfc-9tcn7\" (UID: \"7c4ae147-0445-404f-9c19-bd55e71ceae8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.603140 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vljm\" (UniqueName: \"kubernetes.io/projected/1e226353-ea51-4aa1-921b-63a11a769cc7-kube-api-access-2vljm\") pod \"csi-hostpathplugin-k7pxh\" (UID: \"1e226353-ea51-4aa1-921b-63a11a769cc7\") " pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.604290 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b48e4b5-ade1-4838-8866-692179d8418a-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-t2n95\" (UID: \"4b48e4b5-ade1-4838-8866-692179d8418a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2n95" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.605473 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c5a72af5-cc5a-4d16-acc9-2126107c6b6c-proxy-tls\") pod \"machine-config-operator-74547568cd-m4qpv\" (UID: \"c5a72af5-cc5a-4d16-acc9-2126107c6b6c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m4qpv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.606097 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6365e1b5-5ddc-4f83-8388-6ffc37bf18e2-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-7w8rw\" (UID: \"6365e1b5-5ddc-4f83-8388-6ffc37bf18e2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7w8rw" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.606232 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6365e1b5-5ddc-4f83-8388-6ffc37bf18e2-service-ca-bundle\") pod \"authentication-operator-69f744f599-7w8rw\" (UID: \"6365e1b5-5ddc-4f83-8388-6ffc37bf18e2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7w8rw" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.607675 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6365e1b5-5ddc-4f83-8388-6ffc37bf18e2-config\") pod \"authentication-operator-69f744f599-7w8rw\" (UID: \"6365e1b5-5ddc-4f83-8388-6ffc37bf18e2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7w8rw" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.609419 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2754f9a4-6375-4864-9d69-7674e3dfe490-config\") pod \"route-controller-manager-6576b87f9c-lzrgn\" (UID: \"2754f9a4-6375-4864-9d69-7674e3dfe490\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.609941 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d0451ecc-c881-4b85-a6af-63796033ef83-etcd-client\") pod \"etcd-operator-b45778765-w5fph\" (UID: \"d0451ecc-c881-4b85-a6af-63796033ef83\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.612367 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e941413-0d6f-4fe7-bfc1-218b8bc6d026-config\") pod \"kube-controller-manager-operator-78b949d7b-8gchm\" (UID: \"2e941413-0d6f-4fe7-bfc1-218b8bc6d026\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8gchm" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.615810 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d0451ecc-c881-4b85-a6af-63796033ef83-serving-cert\") pod \"etcd-operator-b45778765-w5fph\" (UID: \"d0451ecc-c881-4b85-a6af-63796033ef83\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.617523 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2423ea84-11dc-47d0-8d1d-1ce24260f02e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-895rs\" (UID: \"2423ea84-11dc-47d0-8d1d-1ce24260f02e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-895rs" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.619051 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/d0451ecc-c881-4b85-a6af-63796033ef83-etcd-service-ca\") pod \"etcd-operator-b45778765-w5fph\" (UID: \"d0451ecc-c881-4b85-a6af-63796033ef83\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.629191 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2754f9a4-6375-4864-9d69-7674e3dfe490-serving-cert\") pod \"route-controller-manager-6576b87f9c-lzrgn\" (UID: \"2754f9a4-6375-4864-9d69-7674e3dfe490\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.640882 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2e941413-0d6f-4fe7-bfc1-218b8bc6d026-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8gchm\" (UID: \"2e941413-0d6f-4fe7-bfc1-218b8bc6d026\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8gchm" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.641461 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2e941413-0d6f-4fe7-bfc1-218b8bc6d026-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8gchm\" (UID: \"2e941413-0d6f-4fe7-bfc1-218b8bc6d026\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8gchm" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.643909 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwqbj\" (UniqueName: \"kubernetes.io/projected/6365e1b5-5ddc-4f83-8388-6ffc37bf18e2-kube-api-access-mwqbj\") pod \"authentication-operator-69f744f599-7w8rw\" (UID: \"6365e1b5-5ddc-4f83-8388-6ffc37bf18e2\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7w8rw" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.687709 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-x5sc8"] Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.691614 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kq676\" (UniqueName: \"kubernetes.io/projected/d0451ecc-c881-4b85-a6af-63796033ef83-kube-api-access-kq676\") pod \"etcd-operator-b45778765-w5fph\" (UID: \"d0451ecc-c881-4b85-a6af-63796033ef83\") " pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704033 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qgpcl\" (UniqueName: \"kubernetes.io/projected/3a66da44-fbdf-4676-8250-0b146b67a5e9-kube-api-access-qgpcl\") pod \"catalog-operator-68c6474976-r8m8l\" (UID: \"3a66da44-fbdf-4676-8250-0b146b67a5e9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r8m8l" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704075 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzz8n\" (UniqueName: \"kubernetes.io/projected/d37b6448-0b50-4bf9-b345-0feec5f5babb-kube-api-access-gzz8n\") pod \"service-ca-9c57cc56f-lwkxb\" (UID: \"d37b6448-0b50-4bf9-b345-0feec5f5babb\") " pod="openshift-service-ca/service-ca-9c57cc56f-lwkxb" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704104 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/e2896f64-29a2-4d9e-9836-e0abe2a8162f-srv-cert\") pod \"olm-operator-6b444d44fb-9qv2z\" (UID: \"e2896f64-29a2-4d9e-9836-e0abe2a8162f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9qv2z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704130 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7mqm\" (UniqueName: \"kubernetes.io/projected/6a24e010-6ffa-45f6-9b92-99896a2c287f-kube-api-access-x7mqm\") pod \"package-server-manager-789f6589d5-28jpz\" (UID: \"6a24e010-6ffa-45f6-9b92-99896a2c287f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-28jpz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704148 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a458fdb3-0662-44a0-8df6-b81dcb66a669-secret-volume\") pod \"collect-profiles-29426790-559jq\" (UID: \"a458fdb3-0662-44a0-8df6-b81dcb66a669\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704165 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/e2896f64-29a2-4d9e-9836-e0abe2a8162f-profile-collector-cert\") pod \"olm-operator-6b444d44fb-9qv2z\" (UID: \"e2896f64-29a2-4d9e-9836-e0abe2a8162f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9qv2z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704185 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6111e5a8-1616-417d-a3d6-d7b1a39ec709-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-cwvrg\" (UID: \"6111e5a8-1616-417d-a3d6-d7b1a39ec709\") " pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704204 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5v67\" (UniqueName: \"kubernetes.io/projected/a458fdb3-0662-44a0-8df6-b81dcb66a669-kube-api-access-d5v67\") pod \"collect-profiles-29426790-559jq\" (UID: \"a458fdb3-0662-44a0-8df6-b81dcb66a669\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704224 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c4d0beae-b187-40c3-bb35-aac05eec25b0-cert\") pod \"ingress-canary-6zbpx\" (UID: \"c4d0beae-b187-40c3-bb35-aac05eec25b0\") " pod="openshift-ingress-canary/ingress-canary-6zbpx" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704257 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3a66da44-fbdf-4676-8250-0b146b67a5e9-profile-collector-cert\") pod \"catalog-operator-68c6474976-r8m8l\" (UID: \"3a66da44-fbdf-4676-8250-0b146b67a5e9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r8m8l" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704291 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7c4ae147-0445-404f-9c19-bd55e71ceae8-webhook-cert\") pod \"packageserver-d55dfcdfc-9tcn7\" (UID: \"7c4ae147-0445-404f-9c19-bd55e71ceae8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704311 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/54b2c6a4-d68f-4e1d-a686-626abdb6e127-default-certificate\") pod \"router-default-5444994796-rtnpd\" (UID: \"54b2c6a4-d68f-4e1d-a686-626abdb6e127\") " pod="openshift-ingress/router-default-5444994796-rtnpd" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704331 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/27a55ec7-3458-4125-aacf-9c6a91b5145b-serving-cert\") pod \"console-operator-58897d9998-cxfwv\" (UID: \"27a55ec7-3458-4125-aacf-9c6a91b5145b\") " pod="openshift-console-operator/console-operator-58897d9998-cxfwv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704351 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/27a55ec7-3458-4125-aacf-9c6a91b5145b-trusted-ca\") pod \"console-operator-58897d9998-cxfwv\" (UID: \"27a55ec7-3458-4125-aacf-9c6a91b5145b\") " pod="openshift-console-operator/console-operator-58897d9998-cxfwv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704372 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6111e5a8-1616-417d-a3d6-d7b1a39ec709-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-cwvrg\" (UID: \"6111e5a8-1616-417d-a3d6-d7b1a39ec709\") " pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704392 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/54b2c6a4-d68f-4e1d-a686-626abdb6e127-service-ca-bundle\") pod \"router-default-5444994796-rtnpd\" (UID: \"54b2c6a4-d68f-4e1d-a686-626abdb6e127\") " pod="openshift-ingress/router-default-5444994796-rtnpd" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704418 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjnkt\" (UniqueName: \"kubernetes.io/projected/c4d0beae-b187-40c3-bb35-aac05eec25b0-kube-api-access-pjnkt\") pod \"ingress-canary-6zbpx\" (UID: \"c4d0beae-b187-40c3-bb35-aac05eec25b0\") " pod="openshift-ingress-canary/ingress-canary-6zbpx" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704686 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3a66da44-fbdf-4676-8250-0b146b67a5e9-srv-cert\") pod \"catalog-operator-68c6474976-r8m8l\" (UID: \"3a66da44-fbdf-4676-8250-0b146b67a5e9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r8m8l" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704713 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7c4ae147-0445-404f-9c19-bd55e71ceae8-apiservice-cert\") pod \"packageserver-d55dfcdfc-9tcn7\" (UID: \"7c4ae147-0445-404f-9c19-bd55e71ceae8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704734 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vljm\" (UniqueName: \"kubernetes.io/projected/1e226353-ea51-4aa1-921b-63a11a769cc7-kube-api-access-2vljm\") pod \"csi-hostpathplugin-k7pxh\" (UID: \"1e226353-ea51-4aa1-921b-63a11a769cc7\") " pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704760 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704782 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qg9l8\" (UniqueName: \"kubernetes.io/projected/9d2b0c42-857f-4f52-8742-02c7ee9a9fd3-kube-api-access-qg9l8\") pod \"migrator-59844c95c7-xkmc7\" (UID: \"9d2b0c42-857f-4f52-8742-02c7ee9a9fd3\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-xkmc7" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704825 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6a6efd27-be04-47b3-8f2c-fa84e687e4f2-bound-sa-token\") pod \"ingress-operator-5b745b69d9-zrtpr\" (UID: \"6a6efd27-be04-47b3-8f2c-fa84e687e4f2\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zrtpr" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704846 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/603fdfea-79ed-48f4-89b0-2231fe8fed87-metrics-tls\") pod \"dns-default-6p4wm\" (UID: \"603fdfea-79ed-48f4-89b0-2231fe8fed87\") " pod="openshift-dns/dns-default-6p4wm" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704866 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrh4r\" (UniqueName: \"kubernetes.io/projected/5dc88be1-84b6-4574-9b80-de53c49a7b1e-kube-api-access-vrh4r\") pod \"machine-config-server-gzp2z\" (UID: \"5dc88be1-84b6-4574-9b80-de53c49a7b1e\") " pod="openshift-machine-config-operator/machine-config-server-gzp2z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704882 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27a55ec7-3458-4125-aacf-9c6a91b5145b-config\") pod \"console-operator-58897d9998-cxfwv\" (UID: \"27a55ec7-3458-4125-aacf-9c6a91b5145b\") " pod="openshift-console-operator/console-operator-58897d9998-cxfwv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704910 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v9njl\" (UniqueName: \"kubernetes.io/projected/4359b12c-c2c7-484a-978d-388afe815a40-kube-api-access-v9njl\") pod \"kube-storage-version-migrator-operator-b67b599dd-rd7ph\" (UID: \"4359b12c-c2c7-484a-978d-388afe815a40\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rd7ph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704930 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/1e226353-ea51-4aa1-921b-63a11a769cc7-registration-dir\") pod \"csi-hostpathplugin-k7pxh\" (UID: \"1e226353-ea51-4aa1-921b-63a11a769cc7\") " pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704950 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4359b12c-c2c7-484a-978d-388afe815a40-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-rd7ph\" (UID: \"4359b12c-c2c7-484a-978d-388afe815a40\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rd7ph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.704976 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkhlf\" (UniqueName: \"kubernetes.io/projected/7c4ae147-0445-404f-9c19-bd55e71ceae8-kube-api-access-dkhlf\") pod \"packageserver-d55dfcdfc-9tcn7\" (UID: \"7c4ae147-0445-404f-9c19-bd55e71ceae8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705000 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/54b2c6a4-d68f-4e1d-a686-626abdb6e127-metrics-certs\") pod \"router-default-5444994796-rtnpd\" (UID: \"54b2c6a4-d68f-4e1d-a686-626abdb6e127\") " pod="openshift-ingress/router-default-5444994796-rtnpd" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705016 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6pjjk\" (UniqueName: \"kubernetes.io/projected/603fdfea-79ed-48f4-89b0-2231fe8fed87-kube-api-access-6pjjk\") pod \"dns-default-6p4wm\" (UID: \"603fdfea-79ed-48f4-89b0-2231fe8fed87\") " pod="openshift-dns/dns-default-6p4wm" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705034 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/d37b6448-0b50-4bf9-b345-0feec5f5babb-signing-key\") pod \"service-ca-9c57cc56f-lwkxb\" (UID: \"d37b6448-0b50-4bf9-b345-0feec5f5babb\") " pod="openshift-service-ca/service-ca-9c57cc56f-lwkxb" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705051 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4359b12c-c2c7-484a-978d-388afe815a40-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-rd7ph\" (UID: \"4359b12c-c2c7-484a-978d-388afe815a40\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rd7ph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705074 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/cdcbb504-e2d6-4511-bf24-d18ba641f45b-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-l5kdh\" (UID: \"cdcbb504-e2d6-4511-bf24-d18ba641f45b\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l5kdh" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705096 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/6a24e010-6ffa-45f6-9b92-99896a2c287f-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-28jpz\" (UID: \"6a24e010-6ffa-45f6-9b92-99896a2c287f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-28jpz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705117 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05dc53d2-6b17-42e1-b211-43716c5f3037-config\") pod \"service-ca-operator-777779d784-4vp8z\" (UID: \"05dc53d2-6b17-42e1-b211-43716c5f3037\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4vp8z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705134 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5dc88be1-84b6-4574-9b80-de53c49a7b1e-node-bootstrap-token\") pod \"machine-config-server-gzp2z\" (UID: \"5dc88be1-84b6-4574-9b80-de53c49a7b1e\") " pod="openshift-machine-config-operator/machine-config-server-gzp2z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705154 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/7c4ae147-0445-404f-9c19-bd55e71ceae8-tmpfs\") pod \"packageserver-d55dfcdfc-9tcn7\" (UID: \"7c4ae147-0445-404f-9c19-bd55e71ceae8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705172 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pws8\" (UniqueName: \"kubernetes.io/projected/27a55ec7-3458-4125-aacf-9c6a91b5145b-kube-api-access-2pws8\") pod \"console-operator-58897d9998-cxfwv\" (UID: \"27a55ec7-3458-4125-aacf-9c6a91b5145b\") " pod="openshift-console-operator/console-operator-58897d9998-cxfwv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705188 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/05dc53d2-6b17-42e1-b211-43716c5f3037-serving-cert\") pod \"service-ca-operator-777779d784-4vp8z\" (UID: \"05dc53d2-6b17-42e1-b211-43716c5f3037\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4vp8z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705207 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5v4qk\" (UniqueName: \"kubernetes.io/projected/cdcbb504-e2d6-4511-bf24-d18ba641f45b-kube-api-access-5v4qk\") pod \"control-plane-machine-set-operator-78cbb6b69f-l5kdh\" (UID: \"cdcbb504-e2d6-4511-bf24-d18ba641f45b\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l5kdh" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705348 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/54b2c6a4-d68f-4e1d-a686-626abdb6e127-stats-auth\") pod \"router-default-5444994796-rtnpd\" (UID: \"54b2c6a4-d68f-4e1d-a686-626abdb6e127\") " pod="openshift-ingress/router-default-5444994796-rtnpd" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705363 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6a6efd27-be04-47b3-8f2c-fa84e687e4f2-metrics-tls\") pod \"ingress-operator-5b745b69d9-zrtpr\" (UID: \"6a6efd27-be04-47b3-8f2c-fa84e687e4f2\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zrtpr" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705384 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5dc88be1-84b6-4574-9b80-de53c49a7b1e-certs\") pod \"machine-config-server-gzp2z\" (UID: \"5dc88be1-84b6-4574-9b80-de53c49a7b1e\") " pod="openshift-machine-config-operator/machine-config-server-gzp2z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705401 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/1e226353-ea51-4aa1-921b-63a11a769cc7-mountpoint-dir\") pod \"csi-hostpathplugin-k7pxh\" (UID: \"1e226353-ea51-4aa1-921b-63a11a769cc7\") " pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705454 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wh6v\" (UniqueName: \"kubernetes.io/projected/54b2c6a4-d68f-4e1d-a686-626abdb6e127-kube-api-access-4wh6v\") pod \"router-default-5444994796-rtnpd\" (UID: \"54b2c6a4-d68f-4e1d-a686-626abdb6e127\") " pod="openshift-ingress/router-default-5444994796-rtnpd" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705475 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mfsl\" (UniqueName: \"kubernetes.io/projected/6111e5a8-1616-417d-a3d6-d7b1a39ec709-kube-api-access-9mfsl\") pod \"marketplace-operator-79b997595-cwvrg\" (UID: \"6111e5a8-1616-417d-a3d6-d7b1a39ec709\") " pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705494 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a458fdb3-0662-44a0-8df6-b81dcb66a669-config-volume\") pod \"collect-profiles-29426790-559jq\" (UID: \"a458fdb3-0662-44a0-8df6-b81dcb66a669\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705513 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6a6efd27-be04-47b3-8f2c-fa84e687e4f2-trusted-ca\") pod \"ingress-operator-5b745b69d9-zrtpr\" (UID: \"6a6efd27-be04-47b3-8f2c-fa84e687e4f2\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zrtpr" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705535 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/1e226353-ea51-4aa1-921b-63a11a769cc7-socket-dir\") pod \"csi-hostpathplugin-k7pxh\" (UID: \"1e226353-ea51-4aa1-921b-63a11a769cc7\") " pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705554 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/603fdfea-79ed-48f4-89b0-2231fe8fed87-config-volume\") pod \"dns-default-6p4wm\" (UID: \"603fdfea-79ed-48f4-89b0-2231fe8fed87\") " pod="openshift-dns/dns-default-6p4wm" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705574 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4rvw\" (UniqueName: \"kubernetes.io/projected/e2896f64-29a2-4d9e-9836-e0abe2a8162f-kube-api-access-j4rvw\") pod \"olm-operator-6b444d44fb-9qv2z\" (UID: \"e2896f64-29a2-4d9e-9836-e0abe2a8162f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9qv2z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705594 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dncxg\" (UniqueName: \"kubernetes.io/projected/6a6efd27-be04-47b3-8f2c-fa84e687e4f2-kube-api-access-dncxg\") pod \"ingress-operator-5b745b69d9-zrtpr\" (UID: \"6a6efd27-be04-47b3-8f2c-fa84e687e4f2\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zrtpr" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705612 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/d37b6448-0b50-4bf9-b345-0feec5f5babb-signing-cabundle\") pod \"service-ca-9c57cc56f-lwkxb\" (UID: \"d37b6448-0b50-4bf9-b345-0feec5f5babb\") " pod="openshift-service-ca/service-ca-9c57cc56f-lwkxb" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705638 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/1e226353-ea51-4aa1-921b-63a11a769cc7-plugins-dir\") pod \"csi-hostpathplugin-k7pxh\" (UID: \"1e226353-ea51-4aa1-921b-63a11a769cc7\") " pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705658 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6nxxq\" (UniqueName: \"kubernetes.io/projected/05dc53d2-6b17-42e1-b211-43716c5f3037-kube-api-access-6nxxq\") pod \"service-ca-operator-777779d784-4vp8z\" (UID: \"05dc53d2-6b17-42e1-b211-43716c5f3037\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4vp8z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705683 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/1e226353-ea51-4aa1-921b-63a11a769cc7-csi-data-dir\") pod \"csi-hostpathplugin-k7pxh\" (UID: \"1e226353-ea51-4aa1-921b-63a11a769cc7\") " pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705837 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/1e226353-ea51-4aa1-921b-63a11a769cc7-csi-data-dir\") pod \"csi-hostpathplugin-k7pxh\" (UID: \"1e226353-ea51-4aa1-921b-63a11a769cc7\") " pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.705849 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dp45t\" (UniqueName: \"kubernetes.io/projected/2423ea84-11dc-47d0-8d1d-1ce24260f02e-kube-api-access-dp45t\") pod \"multus-admission-controller-857f4d67dd-895rs\" (UID: \"2423ea84-11dc-47d0-8d1d-1ce24260f02e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-895rs" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.706534 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/54b2c6a4-d68f-4e1d-a686-626abdb6e127-service-ca-bundle\") pod \"router-default-5444994796-rtnpd\" (UID: \"54b2c6a4-d68f-4e1d-a686-626abdb6e127\") " pod="openshift-ingress/router-default-5444994796-rtnpd" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.706805 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05dc53d2-6b17-42e1-b211-43716c5f3037-config\") pod \"service-ca-operator-777779d784-4vp8z\" (UID: \"05dc53d2-6b17-42e1-b211-43716c5f3037\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4vp8z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.707177 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/1e226353-ea51-4aa1-921b-63a11a769cc7-registration-dir\") pod \"csi-hostpathplugin-k7pxh\" (UID: \"1e226353-ea51-4aa1-921b-63a11a769cc7\") " pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.708562 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/d37b6448-0b50-4bf9-b345-0feec5f5babb-signing-cabundle\") pod \"service-ca-9c57cc56f-lwkxb\" (UID: \"d37b6448-0b50-4bf9-b345-0feec5f5babb\") " pod="openshift-service-ca/service-ca-9c57cc56f-lwkxb" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.710102 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a458fdb3-0662-44a0-8df6-b81dcb66a669-config-volume\") pod \"collect-profiles-29426790-559jq\" (UID: \"a458fdb3-0662-44a0-8df6-b81dcb66a669\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.710649 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/7c4ae147-0445-404f-9c19-bd55e71ceae8-tmpfs\") pod \"packageserver-d55dfcdfc-9tcn7\" (UID: \"7c4ae147-0445-404f-9c19-bd55e71ceae8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.711861 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/1e226353-ea51-4aa1-921b-63a11a769cc7-plugins-dir\") pod \"csi-hostpathplugin-k7pxh\" (UID: \"1e226353-ea51-4aa1-921b-63a11a769cc7\") " pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.712009 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/1e226353-ea51-4aa1-921b-63a11a769cc7-mountpoint-dir\") pod \"csi-hostpathplugin-k7pxh\" (UID: \"1e226353-ea51-4aa1-921b-63a11a769cc7\") " pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.712238 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/1e226353-ea51-4aa1-921b-63a11a769cc7-socket-dir\") pod \"csi-hostpathplugin-k7pxh\" (UID: \"1e226353-ea51-4aa1-921b-63a11a769cc7\") " pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.712943 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6a6efd27-be04-47b3-8f2c-fa84e687e4f2-trusted-ca\") pod \"ingress-operator-5b745b69d9-zrtpr\" (UID: \"6a6efd27-be04-47b3-8f2c-fa84e687e4f2\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zrtpr" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.712953 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/603fdfea-79ed-48f4-89b0-2231fe8fed87-config-volume\") pod \"dns-default-6p4wm\" (UID: \"603fdfea-79ed-48f4-89b0-2231fe8fed87\") " pod="openshift-dns/dns-default-6p4wm" Dec 13 06:31:50 crc kubenswrapper[5048]: E1213 06:31:50.715386 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:51.215363261 +0000 UTC m=+145.081957842 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.716666 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5dc88be1-84b6-4574-9b80-de53c49a7b1e-node-bootstrap-token\") pod \"machine-config-server-gzp2z\" (UID: \"5dc88be1-84b6-4574-9b80-de53c49a7b1e\") " pod="openshift-machine-config-operator/machine-config-server-gzp2z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.721247 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/e2896f64-29a2-4d9e-9836-e0abe2a8162f-srv-cert\") pod \"olm-operator-6b444d44fb-9qv2z\" (UID: \"e2896f64-29a2-4d9e-9836-e0abe2a8162f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9qv2z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.721774 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/6a24e010-6ffa-45f6-9b92-99896a2c287f-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-28jpz\" (UID: \"6a24e010-6ffa-45f6-9b92-99896a2c287f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-28jpz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.722110 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4359b12c-c2c7-484a-978d-388afe815a40-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-rd7ph\" (UID: \"4359b12c-c2c7-484a-978d-388afe815a40\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rd7ph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.722646 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c4d0beae-b187-40c3-bb35-aac05eec25b0-cert\") pod \"ingress-canary-6zbpx\" (UID: \"c4d0beae-b187-40c3-bb35-aac05eec25b0\") " pod="openshift-ingress-canary/ingress-canary-6zbpx" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.723869 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/54b2c6a4-d68f-4e1d-a686-626abdb6e127-default-certificate\") pod \"router-default-5444994796-rtnpd\" (UID: \"54b2c6a4-d68f-4e1d-a686-626abdb6e127\") " pod="openshift-ingress/router-default-5444994796-rtnpd" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.725509 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6111e5a8-1616-417d-a3d6-d7b1a39ec709-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-cwvrg\" (UID: \"6111e5a8-1616-417d-a3d6-d7b1a39ec709\") " pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.727419 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6111e5a8-1616-417d-a3d6-d7b1a39ec709-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-cwvrg\" (UID: \"6111e5a8-1616-417d-a3d6-d7b1a39ec709\") " pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.730560 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/05dc53d2-6b17-42e1-b211-43716c5f3037-serving-cert\") pod \"service-ca-operator-777779d784-4vp8z\" (UID: \"05dc53d2-6b17-42e1-b211-43716c5f3037\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4vp8z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.731690 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/e2896f64-29a2-4d9e-9836-e0abe2a8162f-profile-collector-cert\") pod \"olm-operator-6b444d44fb-9qv2z\" (UID: \"e2896f64-29a2-4d9e-9836-e0abe2a8162f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9qv2z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.732317 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a458fdb3-0662-44a0-8df6-b81dcb66a669-secret-volume\") pod \"collect-profiles-29426790-559jq\" (UID: \"a458fdb3-0662-44a0-8df6-b81dcb66a669\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.734101 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6a6efd27-be04-47b3-8f2c-fa84e687e4f2-metrics-tls\") pod \"ingress-operator-5b745b69d9-zrtpr\" (UID: \"6a6efd27-be04-47b3-8f2c-fa84e687e4f2\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zrtpr" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.735234 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5dc88be1-84b6-4574-9b80-de53c49a7b1e-certs\") pod \"machine-config-server-gzp2z\" (UID: \"5dc88be1-84b6-4574-9b80-de53c49a7b1e\") " pod="openshift-machine-config-operator/machine-config-server-gzp2z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.737106 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3a66da44-fbdf-4676-8250-0b146b67a5e9-srv-cert\") pod \"catalog-operator-68c6474976-r8m8l\" (UID: \"3a66da44-fbdf-4676-8250-0b146b67a5e9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r8m8l" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.737338 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7c4ae147-0445-404f-9c19-bd55e71ceae8-apiservice-cert\") pod \"packageserver-d55dfcdfc-9tcn7\" (UID: \"7c4ae147-0445-404f-9c19-bd55e71ceae8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.738245 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3a66da44-fbdf-4676-8250-0b146b67a5e9-profile-collector-cert\") pod \"catalog-operator-68c6474976-r8m8l\" (UID: \"3a66da44-fbdf-4676-8250-0b146b67a5e9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r8m8l" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.740584 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/54b2c6a4-d68f-4e1d-a686-626abdb6e127-stats-auth\") pod \"router-default-5444994796-rtnpd\" (UID: \"54b2c6a4-d68f-4e1d-a686-626abdb6e127\") " pod="openshift-ingress/router-default-5444994796-rtnpd" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.743674 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/54b2c6a4-d68f-4e1d-a686-626abdb6e127-metrics-certs\") pod \"router-default-5444994796-rtnpd\" (UID: \"54b2c6a4-d68f-4e1d-a686-626abdb6e127\") " pod="openshift-ingress/router-default-5444994796-rtnpd" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.744300 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7c4ae147-0445-404f-9c19-bd55e71ceae8-webhook-cert\") pod \"packageserver-d55dfcdfc-9tcn7\" (UID: \"7c4ae147-0445-404f-9c19-bd55e71ceae8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.748994 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/cdcbb504-e2d6-4511-bf24-d18ba641f45b-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-l5kdh\" (UID: \"cdcbb504-e2d6-4511-bf24-d18ba641f45b\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l5kdh" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.752700 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/d37b6448-0b50-4bf9-b345-0feec5f5babb-signing-key\") pod \"service-ca-9c57cc56f-lwkxb\" (UID: \"d37b6448-0b50-4bf9-b345-0feec5f5babb\") " pod="openshift-service-ca/service-ca-9c57cc56f-lwkxb" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.777758 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jt626\" (UniqueName: \"kubernetes.io/projected/4b48e4b5-ade1-4838-8866-692179d8418a-kube-api-access-jt626\") pod \"openshift-controller-manager-operator-756b6f6bc6-t2n95\" (UID: \"4b48e4b5-ade1-4838-8866-692179d8418a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2n95" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.784327 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2n95" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.787531 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/36ee331b-baa0-42ac-9bd3-7c52253814e1-bound-sa-token\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.793187 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfwjt\" (UniqueName: \"kubernetes.io/projected/c5a72af5-cc5a-4d16-acc9-2126107c6b6c-kube-api-access-jfwjt\") pod \"machine-config-operator-74547568cd-m4qpv\" (UID: \"c5a72af5-cc5a-4d16-acc9-2126107c6b6c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m4qpv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.802081 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.804083 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" event={"ID":"48280256-4ec4-4530-a2f1-8bee7c6b4871","Type":"ContainerStarted","Data":"4bb591e0b0114cd68ba2aaa7e19ddee5901aba3b7548535f4ed7b3a755a92223"} Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.807329 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:50 crc kubenswrapper[5048]: E1213 06:31:50.807903 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:51.307871052 +0000 UTC m=+145.174465813 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.808804 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-x5sc8" event={"ID":"81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4","Type":"ContainerStarted","Data":"bc84d6cbed8b5fae9332ea55ff795032eb1e6d0fd8fc01cf99d77028d6b68f49"} Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.811162 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6skd\" (UniqueName: \"kubernetes.io/projected/2754f9a4-6375-4864-9d69-7674e3dfe490-kube-api-access-p6skd\") pod \"route-controller-manager-6576b87f9c-lzrgn\" (UID: \"2754f9a4-6375-4864-9d69-7674e3dfe490\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.815873 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-svwnq" event={"ID":"b5ed0932-7c88-4245-9403-8a0e72659f59","Type":"ContainerStarted","Data":"c955a7d72781f492e759bbc1320d7b3f4684214e6027fe709261391fbe07b13f"} Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.820775 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-75h2c" event={"ID":"3f047ff9-f271-4f18-935e-f811afd75852","Type":"ContainerStarted","Data":"2e7ca2d19cdad66801827e506fc98ae5e10fd9ca602ba57725b4ac6bbd4b6853"} Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.822813 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-7w8rw" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.822922 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgc8n" event={"ID":"4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3","Type":"ContainerStarted","Data":"04785b6a8e9cd06351d05f93703dddcdbdfa809ffaba1d93079d5ec32065157a"} Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.826112 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7ch79" event={"ID":"bfeb748d-a196-4696-a599-70a6386cb89b","Type":"ContainerStarted","Data":"56abe007ede75c9e3e7049f61cfafcc4585d5dcefd7d3e33dc496b2379906e38"} Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.827886 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" event={"ID":"6567f364-4ab2-489b-837f-1e7c194f311d","Type":"ContainerStarted","Data":"940dfe480ca8d9d07ac51c5b1f83efb6c9ec2af812cc6942e7b653db1597b22b"} Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.833632 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m4qpv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.849356 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjnkt\" (UniqueName: \"kubernetes.io/projected/c4d0beae-b187-40c3-bb35-aac05eec25b0-kube-api-access-pjnkt\") pod \"ingress-canary-6zbpx\" (UID: \"c4d0beae-b187-40c3-bb35-aac05eec25b0\") " pod="openshift-ingress-canary/ingress-canary-6zbpx" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.867383 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v9njl\" (UniqueName: \"kubernetes.io/projected/4359b12c-c2c7-484a-978d-388afe815a40-kube-api-access-v9njl\") pod \"kube-storage-version-migrator-operator-b67b599dd-rd7ph\" (UID: \"4359b12c-c2c7-484a-978d-388afe815a40\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rd7ph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.872818 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8gchm" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.894082 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qgpcl\" (UniqueName: \"kubernetes.io/projected/3a66da44-fbdf-4676-8250-0b146b67a5e9-kube-api-access-qgpcl\") pod \"catalog-operator-68c6474976-r8m8l\" (UID: \"3a66da44-fbdf-4676-8250-0b146b67a5e9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r8m8l" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.909708 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: E1213 06:31:50.911115 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:51.411094599 +0000 UTC m=+145.277689180 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.920776 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzz8n\" (UniqueName: \"kubernetes.io/projected/d37b6448-0b50-4bf9-b345-0feec5f5babb-kube-api-access-gzz8n\") pod \"service-ca-9c57cc56f-lwkxb\" (UID: \"d37b6448-0b50-4bf9-b345-0feec5f5babb\") " pod="openshift-service-ca/service-ca-9c57cc56f-lwkxb" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.929250 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mfsl\" (UniqueName: \"kubernetes.io/projected/6111e5a8-1616-417d-a3d6-d7b1a39ec709-kube-api-access-9mfsl\") pod \"marketplace-operator-79b997595-cwvrg\" (UID: \"6111e5a8-1616-417d-a3d6-d7b1a39ec709\") " pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.937392 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/36ee331b-baa0-42ac-9bd3-7c52253814e1-installation-pull-secrets\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.938747 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4359b12c-c2c7-484a-978d-388afe815a40-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-rd7ph\" (UID: \"4359b12c-c2c7-484a-978d-388afe815a40\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rd7ph" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.938764 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbc8x\" (UniqueName: \"kubernetes.io/projected/36ee331b-baa0-42ac-9bd3-7c52253814e1-kube-api-access-kbc8x\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.940665 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/603fdfea-79ed-48f4-89b0-2231fe8fed87-metrics-tls\") pod \"dns-default-6p4wm\" (UID: \"603fdfea-79ed-48f4-89b0-2231fe8fed87\") " pod="openshift-dns/dns-default-6p4wm" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.942514 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/27a55ec7-3458-4125-aacf-9c6a91b5145b-trusted-ca\") pod \"console-operator-58897d9998-cxfwv\" (UID: \"27a55ec7-3458-4125-aacf-9c6a91b5145b\") " pod="openshift-console-operator/console-operator-58897d9998-cxfwv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.943023 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmrsg\" (UniqueName: \"kubernetes.io/projected/493b8318-26e3-4f4e-b5d1-e1b2fd57de35-kube-api-access-vmrsg\") pod \"cluster-samples-operator-665b6dd947-5qgj6\" (UID: \"493b8318-26e3-4f4e-b5d1-e1b2fd57de35\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5qgj6" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.944373 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27a55ec7-3458-4125-aacf-9c6a91b5145b-config\") pod \"console-operator-58897d9998-cxfwv\" (UID: \"27a55ec7-3458-4125-aacf-9c6a91b5145b\") " pod="openshift-console-operator/console-operator-58897d9998-cxfwv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.949002 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r8m8l" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.957505 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/27a55ec7-3458-4125-aacf-9c6a91b5145b-serving-cert\") pod \"console-operator-58897d9998-cxfwv\" (UID: \"27a55ec7-3458-4125-aacf-9c6a91b5145b\") " pod="openshift-console-operator/console-operator-58897d9998-cxfwv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.958304 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4rvw\" (UniqueName: \"kubernetes.io/projected/e2896f64-29a2-4d9e-9836-e0abe2a8162f-kube-api-access-j4rvw\") pod \"olm-operator-6b444d44fb-9qv2z\" (UID: \"e2896f64-29a2-4d9e-9836-e0abe2a8162f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9qv2z" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.965656 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-895rs" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.973291 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dncxg\" (UniqueName: \"kubernetes.io/projected/6a6efd27-be04-47b3-8f2c-fa84e687e4f2-kube-api-access-dncxg\") pod \"ingress-operator-5b745b69d9-zrtpr\" (UID: \"6a6efd27-be04-47b3-8f2c-fa84e687e4f2\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zrtpr" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.978194 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.984474 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.998561 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pws8\" (UniqueName: \"kubernetes.io/projected/27a55ec7-3458-4125-aacf-9c6a91b5145b-kube-api-access-2pws8\") pod \"console-operator-58897d9998-cxfwv\" (UID: \"27a55ec7-3458-4125-aacf-9c6a91b5145b\") " pod="openshift-console-operator/console-operator-58897d9998-cxfwv" Dec 13 06:31:50 crc kubenswrapper[5048]: I1213 06:31:50.998911 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rd7ph" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.004485 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6nxxq\" (UniqueName: \"kubernetes.io/projected/05dc53d2-6b17-42e1-b211-43716c5f3037-kube-api-access-6nxxq\") pod \"service-ca-operator-777779d784-4vp8z\" (UID: \"05dc53d2-6b17-42e1-b211-43716c5f3037\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4vp8z" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.012006 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:51 crc kubenswrapper[5048]: E1213 06:31:51.012370 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:51.512352884 +0000 UTC m=+145.378947465 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.018423 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-vtxwp"] Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.030904 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-g9bxc"] Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.037710 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5v4qk\" (UniqueName: \"kubernetes.io/projected/cdcbb504-e2d6-4511-bf24-d18ba641f45b-kube-api-access-5v4qk\") pod \"control-plane-machine-set-operator-78cbb6b69f-l5kdh\" (UID: \"cdcbb504-e2d6-4511-bf24-d18ba641f45b\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l5kdh" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.044797 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wh6v\" (UniqueName: \"kubernetes.io/projected/54b2c6a4-d68f-4e1d-a686-626abdb6e127-kube-api-access-4wh6v\") pod \"router-default-5444994796-rtnpd\" (UID: \"54b2c6a4-d68f-4e1d-a686-626abdb6e127\") " pod="openshift-ingress/router-default-5444994796-rtnpd" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.048001 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-6zbpx" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.070936 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qg9l8\" (UniqueName: \"kubernetes.io/projected/9d2b0c42-857f-4f52-8742-02c7ee9a9fd3-kube-api-access-qg9l8\") pod \"migrator-59844c95c7-xkmc7\" (UID: \"9d2b0c42-857f-4f52-8742-02c7ee9a9fd3\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-xkmc7" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.113286 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:51 crc kubenswrapper[5048]: E1213 06:31:51.113864 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:51.613848395 +0000 UTC m=+145.480442976 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.131248 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkhlf\" (UniqueName: \"kubernetes.io/projected/7c4ae147-0445-404f-9c19-bd55e71ceae8-kube-api-access-dkhlf\") pod \"packageserver-d55dfcdfc-9tcn7\" (UID: \"7c4ae147-0445-404f-9c19-bd55e71ceae8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.142019 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-cxfwv" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.147346 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6pjjk\" (UniqueName: \"kubernetes.io/projected/603fdfea-79ed-48f4-89b0-2231fe8fed87-kube-api-access-6pjjk\") pod \"dns-default-6p4wm\" (UID: \"603fdfea-79ed-48f4-89b0-2231fe8fed87\") " pod="openshift-dns/dns-default-6p4wm" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.149141 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-5nt4n"] Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.150088 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vljm\" (UniqueName: \"kubernetes.io/projected/1e226353-ea51-4aa1-921b-63a11a769cc7-kube-api-access-2vljm\") pod \"csi-hostpathplugin-k7pxh\" (UID: \"1e226353-ea51-4aa1-921b-63a11a769cc7\") " pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.151833 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-rtnpd" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.163840 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4vp8z" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.164468 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7mqm\" (UniqueName: \"kubernetes.io/projected/6a24e010-6ffa-45f6-9b92-99896a2c287f-kube-api-access-x7mqm\") pod \"package-server-manager-789f6589d5-28jpz\" (UID: \"6a24e010-6ffa-45f6-9b92-99896a2c287f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-28jpz" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.173347 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6a6efd27-be04-47b3-8f2c-fa84e687e4f2-bound-sa-token\") pod \"ingress-operator-5b745b69d9-zrtpr\" (UID: \"6a6efd27-be04-47b3-8f2c-fa84e687e4f2\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zrtpr" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.180007 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zrtpr" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.195905 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.199180 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-lwkxb" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.207182 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2n95"] Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.215247 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:51 crc kubenswrapper[5048]: E1213 06:31:51.217917 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:51.717888263 +0000 UTC m=+145.584482844 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.221618 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l5kdh" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.229987 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrh4r\" (UniqueName: \"kubernetes.io/projected/5dc88be1-84b6-4574-9b80-de53c49a7b1e-kube-api-access-vrh4r\") pod \"machine-config-server-gzp2z\" (UID: \"5dc88be1-84b6-4574-9b80-de53c49a7b1e\") " pod="openshift-machine-config-operator/machine-config-server-gzp2z" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.232147 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5qgj6" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.232729 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9qv2z" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.233016 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-dwk4m"] Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.236097 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5v67\" (UniqueName: \"kubernetes.io/projected/a458fdb3-0662-44a0-8df6-b81dcb66a669-kube-api-access-d5v67\") pod \"collect-profiles-29426790-559jq\" (UID: \"a458fdb3-0662-44a0-8df6-b81dcb66a669\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.236958 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-x4k67"] Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.240120 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-xclb7"] Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.250557 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-w5fph"] Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.267011 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-28jpz" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.270935 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-7pgnt"] Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.289931 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-xkmc7" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.309290 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-6p4wm" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.318928 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:51 crc kubenswrapper[5048]: E1213 06:31:51.319501 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:51.819484247 +0000 UTC m=+145.686078828 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:51 crc kubenswrapper[5048]: W1213 06:31:51.331568 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8b20de69_950c_42d1_b967_d0fc59d035cd.slice/crio-b32a0930a54a83a67713aee5c8a64698ce9b62cb4d5810682bcf3c206db6e21b WatchSource:0}: Error finding container b32a0930a54a83a67713aee5c8a64698ce9b62cb4d5810682bcf3c206db6e21b: Status 404 returned error can't find the container with id b32a0930a54a83a67713aee5c8a64698ce9b62cb4d5810682bcf3c206db6e21b Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.335984 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.361930 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-gzp2z" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.373576 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-hxskn"] Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.423969 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:51 crc kubenswrapper[5048]: E1213 06:31:51.424416 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:51.924396119 +0000 UTC m=+145.790990700 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.431133 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-7w8rw"] Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.510676 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.526094 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:51 crc kubenswrapper[5048]: E1213 06:31:51.526564 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:52.026534477 +0000 UTC m=+145.893129058 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.628468 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:51 crc kubenswrapper[5048]: E1213 06:31:51.628689 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:52.128642993 +0000 UTC m=+145.995237574 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.629069 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:51 crc kubenswrapper[5048]: E1213 06:31:51.629709 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:52.129681221 +0000 UTC m=+145.996275802 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.730742 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:51 crc kubenswrapper[5048]: E1213 06:31:51.731359 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:52.231336457 +0000 UTC m=+146.097931038 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.780315 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-m4qpv"] Dec 13 06:31:51 crc kubenswrapper[5048]: W1213 06:31:51.789152 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod058c0262_48b0_4176_9f52_20de42c46477.slice/crio-6665aabec03eb85eceb7a5f1e7ce5f8eb7aaf80beb3362bc8aac23c337fd1322 WatchSource:0}: Error finding container 6665aabec03eb85eceb7a5f1e7ce5f8eb7aaf80beb3362bc8aac23c337fd1322: Status 404 returned error can't find the container with id 6665aabec03eb85eceb7a5f1e7ce5f8eb7aaf80beb3362bc8aac23c337fd1322 Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.833083 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:51 crc kubenswrapper[5048]: E1213 06:31:51.833607 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:52.333591458 +0000 UTC m=+146.200186039 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.867478 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-7w8rw" event={"ID":"6365e1b5-5ddc-4f83-8388-6ffc37bf18e2","Type":"ContainerStarted","Data":"c7f1eadec777e38c3226072b510518148bba79beb63b6e745867bcc27b1c0b9e"} Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.868979 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-g9bxc" event={"ID":"b9c10fa1-af81-4b42-9c40-55c4a7aa6703","Type":"ContainerStarted","Data":"278c2c6026026adb7208c3a7943d3dce11712b42090d8f859a1f3cfca2ffb956"} Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.870388 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" event={"ID":"48280256-4ec4-4530-a2f1-8bee7c6b4871","Type":"ContainerStarted","Data":"93487ca41ceb8dc270a20d57a016e0f23c9540b45939a094c8687d5381da4cf1"} Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.871190 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" event={"ID":"aa6cce4c-6bc2-469b-9062-e928744616db","Type":"ContainerStarted","Data":"47a0435c629513e5dd679c6384ff3ae93abdaf8547d488317e60f8a02abc7096"} Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.872061 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7pgnt" event={"ID":"ab459e43-1b40-481c-901c-20344fb51434","Type":"ContainerStarted","Data":"ef914f2e4622c45c4940ee466ce355a4743f1f32ca474965dd585ab06de6d023"} Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.873123 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-xclb7" event={"ID":"f567d62b-7941-466c-84c8-06f6854000ba","Type":"ContainerStarted","Data":"07e2505ce2829ed065f73ebddc67b615496befc5b46c0463a06c04fa886b13d7"} Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.873922 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hxskn" event={"ID":"058c0262-48b0-4176-9f52-20de42c46477","Type":"ContainerStarted","Data":"6665aabec03eb85eceb7a5f1e7ce5f8eb7aaf80beb3362bc8aac23c337fd1322"} Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.878604 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2n95" event={"ID":"4b48e4b5-ade1-4838-8866-692179d8418a","Type":"ContainerStarted","Data":"170f0de57b3194ae9ad57ab95829c9c504d14be65382de529147aeeda984498f"} Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.882153 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" event={"ID":"d0451ecc-c881-4b85-a6af-63796033ef83","Type":"ContainerStarted","Data":"3c0fceeee4c3302c14228bb01917fb5123ae3aa447c1cc27fc45278d8a7d087b"} Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.888055 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5nt4n" event={"ID":"1de0ebfd-b283-4790-badb-fb78d80e6703","Type":"ContainerStarted","Data":"fa1086991ce8c29dd86f25da1668d167842b5855915f0f245a829ac74751b99d"} Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.896210 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-vtxwp" event={"ID":"5a408a68-6f27-4655-a067-3d2b08ad5a7d","Type":"ContainerStarted","Data":"15c1e9b7caeab6b8e9bb9c0f627af3d304da8a3e433b9cf18a62e944ce15ed65"} Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.934839 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:51 crc kubenswrapper[5048]: E1213 06:31:51.935568 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:52.435527791 +0000 UTC m=+146.302122382 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.944595 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-x4k67" event={"ID":"8b20de69-950c-42d1-b967-d0fc59d035cd","Type":"ContainerStarted","Data":"b32a0930a54a83a67713aee5c8a64698ce9b62cb4d5810682bcf3c206db6e21b"} Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.945659 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.974926 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-svwnq" podStartSLOduration=122.974901212 podStartE2EDuration="2m2.974901212s" podCreationTimestamp="2025-12-13 06:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:31:51.971425249 +0000 UTC m=+145.838019840" watchObservedRunningTime="2025-12-13 06:31:51.974901212 +0000 UTC m=+145.841495793" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.983667 5048 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-qcdrc container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.984307 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" podUID="6567f364-4ab2-489b-837f-1e7c194f311d" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" Dec 13 06:31:51 crc kubenswrapper[5048]: I1213 06:31:51.993468 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8gchm"] Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.033314 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" podStartSLOduration=122.033285551 podStartE2EDuration="2m2.033285551s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:31:52.027424204 +0000 UTC m=+145.894018795" watchObservedRunningTime="2025-12-13 06:31:52.033285551 +0000 UTC m=+145.899880132" Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.041292 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:52 crc kubenswrapper[5048]: E1213 06:31:52.043466 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:52.543446223 +0000 UTC m=+146.410040804 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.064602 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cwvrg"] Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.073129 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rd7ph"] Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.080931 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-895rs"] Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.082672 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r8m8l"] Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.084474 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5qgj6"] Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.095180 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-6zbpx"] Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.095224 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn"] Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.126334 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l5kdh"] Dec 13 06:31:52 crc kubenswrapper[5048]: W1213 06:31:52.131370 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc5a72af5_cc5a_4d16_acc9_2126107c6b6c.slice/crio-4e67ef95b8eeda78a2dc61faf9faa3b7454118880454c39aabe8c024b771620b WatchSource:0}: Error finding container 4e67ef95b8eeda78a2dc61faf9faa3b7454118880454c39aabe8c024b771620b: Status 404 returned error can't find the container with id 4e67ef95b8eeda78a2dc61faf9faa3b7454118880454c39aabe8c024b771620b Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.141340 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-lwkxb"] Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.143133 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:52 crc kubenswrapper[5048]: E1213 06:31:52.143619 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:52.643599787 +0000 UTC m=+146.510194368 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.244580 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:52 crc kubenswrapper[5048]: E1213 06:31:52.245084 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:52.745054997 +0000 UTC m=+146.611649578 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.347797 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:52 crc kubenswrapper[5048]: E1213 06:31:52.348233 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:52.848194932 +0000 UTC m=+146.714789533 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.348898 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:52 crc kubenswrapper[5048]: E1213 06:31:52.349519 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:52.849501386 +0000 UTC m=+146.716095967 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.382567 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-cxfwv"] Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.453147 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:52 crc kubenswrapper[5048]: E1213 06:31:52.453326 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:52.953296318 +0000 UTC m=+146.819890899 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.453568 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:52 crc kubenswrapper[5048]: E1213 06:31:52.453938 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:52.953929506 +0000 UTC m=+146.820524087 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.458408 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-4vp8z"] Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.461671 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-zrtpr"] Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.463540 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-6p4wm"] Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.474261 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-xkmc7"] Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.499498 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq"] Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.560359 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:52 crc kubenswrapper[5048]: E1213 06:31:52.561028 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:53.061007465 +0000 UTC m=+146.927602046 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.663008 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:52 crc kubenswrapper[5048]: E1213 06:31:52.663385 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:53.16336934 +0000 UTC m=+147.029963921 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.764208 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:52 crc kubenswrapper[5048]: E1213 06:31:52.764904 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:53.264877711 +0000 UTC m=+147.131472292 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.765255 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:52 crc kubenswrapper[5048]: E1213 06:31:52.765674 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:53.265654812 +0000 UTC m=+147.132249393 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.866351 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:52 crc kubenswrapper[5048]: E1213 06:31:52.866515 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:53.366495925 +0000 UTC m=+147.233090506 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.866947 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:52 crc kubenswrapper[5048]: E1213 06:31:52.867245 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:53.367237395 +0000 UTC m=+147.233831976 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.949287 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8gchm" event={"ID":"2e941413-0d6f-4fe7-bfc1-218b8bc6d026","Type":"ContainerStarted","Data":"75895cd32d5ff71dce351e41253d7c502279248c2cdcc10e753f2595ffac3935"} Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.950256 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r8m8l" event={"ID":"3a66da44-fbdf-4676-8250-0b146b67a5e9","Type":"ContainerStarted","Data":"8067200b2c6154f237c1a362f97d86ff4e73683910f346c550b661d822111232"} Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.951512 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" event={"ID":"2754f9a4-6375-4864-9d69-7674e3dfe490","Type":"ContainerStarted","Data":"58d12ee173e188ce135ebf858a55523a9dd1de59bfb436270b14b5c2f7e655da"} Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.952654 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rd7ph" event={"ID":"4359b12c-c2c7-484a-978d-388afe815a40","Type":"ContainerStarted","Data":"9863eb960c12ec5674914ac9be142ef5644d6350074b181fc2359f3463bd2710"} Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.954657 5048 generic.go:334] "Generic (PLEG): container finished" podID="48280256-4ec4-4530-a2f1-8bee7c6b4871" containerID="93487ca41ceb8dc270a20d57a016e0f23c9540b45939a094c8687d5381da4cf1" exitCode=0 Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.954729 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" event={"ID":"48280256-4ec4-4530-a2f1-8bee7c6b4871","Type":"ContainerDied","Data":"93487ca41ceb8dc270a20d57a016e0f23c9540b45939a094c8687d5381da4cf1"} Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.956391 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-75h2c" event={"ID":"3f047ff9-f271-4f18-935e-f811afd75852","Type":"ContainerStarted","Data":"1bae6eed3003efaa2d955caecd13e8673d712d5774db9c17aad4791fce1c3e6a"} Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.958140 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgc8n" event={"ID":"4fe5a2bb-7e2b-4cf4-bb1b-a0be5c9027e3","Type":"ContainerStarted","Data":"e4bbfda5397ab4673264d7d45c20c4c7d387db12c6b16826a60712612e4b5c3e"} Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.959257 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-6zbpx" event={"ID":"c4d0beae-b187-40c3-bb35-aac05eec25b0","Type":"ContainerStarted","Data":"f00abb7e8250a2a2eaced5ddb8e51772570bd5355f03c95d9405894751e51ccc"} Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.960349 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m4qpv" event={"ID":"c5a72af5-cc5a-4d16-acc9-2126107c6b6c","Type":"ContainerStarted","Data":"4e67ef95b8eeda78a2dc61faf9faa3b7454118880454c39aabe8c024b771620b"} Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.961748 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" event={"ID":"6111e5a8-1616-417d-a3d6-d7b1a39ec709","Type":"ContainerStarted","Data":"36d36ffee34eb72a42c3be3d5ad7496d80960581f5fece4280371c17bc4e457a"} Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.962617 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-895rs" event={"ID":"2423ea84-11dc-47d0-8d1d-1ce24260f02e","Type":"ContainerStarted","Data":"57f3a6eff4c6d36facdf26eb3addcd4d2f0863db2d0d1c3aab5f4403583a0d75"} Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.963498 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-rtnpd" event={"ID":"54b2c6a4-d68f-4e1d-a686-626abdb6e127","Type":"ContainerStarted","Data":"5b925c369c055b82ce3f88a8fd207a932e3bf724a800757d52904de450903f0d"} Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.965215 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-x5sc8" event={"ID":"81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4","Type":"ContainerStarted","Data":"5d251a4df50f418f696ace659023ba0611884f1127b12f8a1f4e44f34393ed93"} Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.967677 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:52 crc kubenswrapper[5048]: E1213 06:31:52.967821 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:53.467806071 +0000 UTC m=+147.334400642 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.967879 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:52 crc kubenswrapper[5048]: E1213 06:31:52.968590 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:53.468582561 +0000 UTC m=+147.335177142 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:52 crc kubenswrapper[5048]: I1213 06:31:52.972097 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.069684 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:53 crc kubenswrapper[5048]: E1213 06:31:53.070056 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:53.570011781 +0000 UTC m=+147.436606352 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.070182 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:53 crc kubenswrapper[5048]: E1213 06:31:53.070750 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:53.570727459 +0000 UTC m=+147.437322180 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.171157 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:53 crc kubenswrapper[5048]: E1213 06:31:53.171315 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:53.671299185 +0000 UTC m=+147.537893766 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.171419 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:53 crc kubenswrapper[5048]: E1213 06:31:53.172043 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:53.672015755 +0000 UTC m=+147.538610356 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.272753 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:53 crc kubenswrapper[5048]: E1213 06:31:53.272980 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:53.772951961 +0000 UTC m=+147.639546572 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.273205 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:53 crc kubenswrapper[5048]: E1213 06:31:53.273691 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:53.77367423 +0000 UTC m=+147.640268841 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.374831 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:53 crc kubenswrapper[5048]: E1213 06:31:53.375094 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:53.875052438 +0000 UTC m=+147.741647019 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.375692 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:53 crc kubenswrapper[5048]: E1213 06:31:53.376137 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:53.876128107 +0000 UTC m=+147.742722688 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.463625 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-28jpz"] Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.466661 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-k7pxh"] Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.468888 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9qv2z"] Dec 13 06:31:53 crc kubenswrapper[5048]: W1213 06:31:53.470215 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcdcbb504_e2d6_4511_bf24_d18ba641f45b.slice/crio-1a7bdc09641c4f58da3d6137ab174c0c2be9c45a97d5550c3ed48344ee276b01 WatchSource:0}: Error finding container 1a7bdc09641c4f58da3d6137ab174c0c2be9c45a97d5550c3ed48344ee276b01: Status 404 returned error can't find the container with id 1a7bdc09641c4f58da3d6137ab174c0c2be9c45a97d5550c3ed48344ee276b01 Dec 13 06:31:53 crc kubenswrapper[5048]: W1213 06:31:53.473276 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd37b6448_0b50_4bf9_b345_0feec5f5babb.slice/crio-eb7c172e8d26dd067ec433276ee764545376264c09a6f7c4c5bc390d60eb8819 WatchSource:0}: Error finding container eb7c172e8d26dd067ec433276ee764545376264c09a6f7c4c5bc390d60eb8819: Status 404 returned error can't find the container with id eb7c172e8d26dd067ec433276ee764545376264c09a6f7c4c5bc390d60eb8819 Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.476683 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:53 crc kubenswrapper[5048]: E1213 06:31:53.477083 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:53.977061572 +0000 UTC m=+147.843656153 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:53 crc kubenswrapper[5048]: W1213 06:31:53.550787 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod603fdfea_79ed_48f4_89b0_2231fe8fed87.slice/crio-5fa599289a720d22b037358570597756f230f8faa4e6c470e33b25f3f62985f6 WatchSource:0}: Error finding container 5fa599289a720d22b037358570597756f230f8faa4e6c470e33b25f3f62985f6: Status 404 returned error can't find the container with id 5fa599289a720d22b037358570597756f230f8faa4e6c470e33b25f3f62985f6 Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.579098 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:53 crc kubenswrapper[5048]: E1213 06:31:53.579573 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:54.07955717 +0000 UTC m=+147.946151741 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:53 crc kubenswrapper[5048]: W1213 06:31:53.585162 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e226353_ea51_4aa1_921b_63a11a769cc7.slice/crio-6d7c1e240e2f5112bd27cd451ec0103bd15ccf58450a6669d9a45b10b9182b23 WatchSource:0}: Error finding container 6d7c1e240e2f5112bd27cd451ec0103bd15ccf58450a6669d9a45b10b9182b23: Status 404 returned error can't find the container with id 6d7c1e240e2f5112bd27cd451ec0103bd15ccf58450a6669d9a45b10b9182b23 Dec 13 06:31:53 crc kubenswrapper[5048]: W1213 06:31:53.594236 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d2b0c42_857f_4f52_8742_02c7ee9a9fd3.slice/crio-87392d71514bfb9b230201ab02e0d0ce35216fed72338b3908643b792a425cf1 WatchSource:0}: Error finding container 87392d71514bfb9b230201ab02e0d0ce35216fed72338b3908643b792a425cf1: Status 404 returned error can't find the container with id 87392d71514bfb9b230201ab02e0d0ce35216fed72338b3908643b792a425cf1 Dec 13 06:31:53 crc kubenswrapper[5048]: W1213 06:31:53.595016 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda458fdb3_0662_44a0_8df6_b81dcb66a669.slice/crio-da410895572ee187110a74cdc92a81e356d3eb3321a1e243b744e5ee0578ea06 WatchSource:0}: Error finding container da410895572ee187110a74cdc92a81e356d3eb3321a1e243b744e5ee0578ea06: Status 404 returned error can't find the container with id da410895572ee187110a74cdc92a81e356d3eb3321a1e243b744e5ee0578ea06 Dec 13 06:31:53 crc kubenswrapper[5048]: W1213 06:31:53.597047 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05dc53d2_6b17_42e1_b211_43716c5f3037.slice/crio-af5efce80b4a4574920203966105faf93ff162d066985deaddd6ee27f3c83f39 WatchSource:0}: Error finding container af5efce80b4a4574920203966105faf93ff162d066985deaddd6ee27f3c83f39: Status 404 returned error can't find the container with id af5efce80b4a4574920203966105faf93ff162d066985deaddd6ee27f3c83f39 Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.680762 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:53 crc kubenswrapper[5048]: E1213 06:31:53.681319 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:54.181301188 +0000 UTC m=+148.047895769 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.782041 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:53 crc kubenswrapper[5048]: E1213 06:31:53.782873 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:54.282853989 +0000 UTC m=+148.149448580 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.794471 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7"] Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.884217 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:53 crc kubenswrapper[5048]: E1213 06:31:53.884467 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:54.384425342 +0000 UTC m=+148.251019923 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.884757 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:53 crc kubenswrapper[5048]: E1213 06:31:53.885299 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:54.385268064 +0000 UTC m=+148.251862835 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.970396 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4vp8z" event={"ID":"05dc53d2-6b17-42e1-b211-43716c5f3037","Type":"ContainerStarted","Data":"af5efce80b4a4574920203966105faf93ff162d066985deaddd6ee27f3c83f39"} Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.972164 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq" event={"ID":"a458fdb3-0662-44a0-8df6-b81dcb66a669","Type":"ContainerStarted","Data":"da410895572ee187110a74cdc92a81e356d3eb3321a1e243b744e5ee0578ea06"} Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.974342 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-xkmc7" event={"ID":"9d2b0c42-857f-4f52-8742-02c7ee9a9fd3","Type":"ContainerStarted","Data":"87392d71514bfb9b230201ab02e0d0ce35216fed72338b3908643b792a425cf1"} Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.975623 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-cxfwv" event={"ID":"27a55ec7-3458-4125-aacf-9c6a91b5145b","Type":"ContainerStarted","Data":"ad76ae5920d29d298595981fcca33c00930fe52970b4067ed9cd6547f1283e17"} Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.978671 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l5kdh" event={"ID":"cdcbb504-e2d6-4511-bf24-d18ba641f45b","Type":"ContainerStarted","Data":"1a7bdc09641c4f58da3d6137ab174c0c2be9c45a97d5550c3ed48344ee276b01"} Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.980427 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-gzp2z" event={"ID":"5dc88be1-84b6-4574-9b80-de53c49a7b1e","Type":"ContainerStarted","Data":"0c7396c7fcaedebe8da414a8df99291157bc9f7d8f8c123332adc89a9d7a059e"} Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.981941 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-6p4wm" event={"ID":"603fdfea-79ed-48f4-89b0-2231fe8fed87","Type":"ContainerStarted","Data":"5fa599289a720d22b037358570597756f230f8faa4e6c470e33b25f3f62985f6"} Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.983288 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-28jpz" event={"ID":"6a24e010-6ffa-45f6-9b92-99896a2c287f","Type":"ContainerStarted","Data":"a95cef7bcee77b3405af5aff9c40d380217c409445b2c16a4b5fd463cd492e63"} Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.984766 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zrtpr" event={"ID":"6a6efd27-be04-47b3-8f2c-fa84e687e4f2","Type":"ContainerStarted","Data":"eca0d55a5fb8a7cb3101e105e28a844e6e6a23139cbe282985400f48bf729ea3"} Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.986001 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.986183 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9qv2z" event={"ID":"e2896f64-29a2-4d9e-9836-e0abe2a8162f","Type":"ContainerStarted","Data":"ef7bdf137c08047b80e6d56219a4ca4e69be8ec4dcfeffb4ef2d277183385d8b"} Dec 13 06:31:53 crc kubenswrapper[5048]: E1213 06:31:53.986239 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:54.48620849 +0000 UTC m=+148.352803071 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.986316 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:53 crc kubenswrapper[5048]: E1213 06:31:53.986852 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:54.486834197 +0000 UTC m=+148.353428788 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.987232 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-lwkxb" event={"ID":"d37b6448-0b50-4bf9-b345-0feec5f5babb","Type":"ContainerStarted","Data":"eb7c172e8d26dd067ec433276ee764545376264c09a6f7c4c5bc390d60eb8819"} Dec 13 06:31:53 crc kubenswrapper[5048]: I1213 06:31:53.988969 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" event={"ID":"1e226353-ea51-4aa1-921b-63a11a769cc7","Type":"ContainerStarted","Data":"6d7c1e240e2f5112bd27cd451ec0103bd15ccf58450a6669d9a45b10b9182b23"} Dec 13 06:31:54 crc kubenswrapper[5048]: I1213 06:31:54.087620 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:54 crc kubenswrapper[5048]: E1213 06:31:54.087783 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:54.587761653 +0000 UTC m=+148.454356254 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:54 crc kubenswrapper[5048]: I1213 06:31:54.088065 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:54 crc kubenswrapper[5048]: E1213 06:31:54.088559 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:54.588541333 +0000 UTC m=+148.455135914 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:54 crc kubenswrapper[5048]: I1213 06:31:54.190026 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:54 crc kubenswrapper[5048]: E1213 06:31:54.190563 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:54.690538648 +0000 UTC m=+148.557133259 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:54 crc kubenswrapper[5048]: I1213 06:31:54.292253 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:54 crc kubenswrapper[5048]: E1213 06:31:54.292852 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:54.792827189 +0000 UTC m=+148.659421760 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:54 crc kubenswrapper[5048]: I1213 06:31:54.393905 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:54 crc kubenswrapper[5048]: E1213 06:31:54.394099 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:54.894069814 +0000 UTC m=+148.760664415 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:54 crc kubenswrapper[5048]: I1213 06:31:54.394195 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:54 crc kubenswrapper[5048]: E1213 06:31:54.394590 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:54.894580027 +0000 UTC m=+148.761174618 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:54 crc kubenswrapper[5048]: I1213 06:31:54.495673 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:54 crc kubenswrapper[5048]: E1213 06:31:54.495908 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:54.995872162 +0000 UTC m=+148.862466743 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:54 crc kubenswrapper[5048]: I1213 06:31:54.496181 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:54 crc kubenswrapper[5048]: E1213 06:31:54.496689 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:54.996668624 +0000 UTC m=+148.863263205 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:54 crc kubenswrapper[5048]: I1213 06:31:54.597353 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:54 crc kubenswrapper[5048]: E1213 06:31:54.597640 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:55.09759744 +0000 UTC m=+148.964192061 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:54 crc kubenswrapper[5048]: I1213 06:31:54.597876 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:54 crc kubenswrapper[5048]: E1213 06:31:54.598325 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:55.098303518 +0000 UTC m=+148.964898099 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:54 crc kubenswrapper[5048]: I1213 06:31:54.699205 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:54 crc kubenswrapper[5048]: E1213 06:31:54.699602 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:55.199581894 +0000 UTC m=+149.066176475 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:54 crc kubenswrapper[5048]: I1213 06:31:54.801146 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:54 crc kubenswrapper[5048]: E1213 06:31:54.801878 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:55.301857835 +0000 UTC m=+149.168452416 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:54 crc kubenswrapper[5048]: I1213 06:31:54.902580 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:54 crc kubenswrapper[5048]: E1213 06:31:54.902704 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:55.402673478 +0000 UTC m=+149.269268059 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:54 crc kubenswrapper[5048]: I1213 06:31:54.902841 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:54 crc kubenswrapper[5048]: E1213 06:31:54.903132 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:55.403123209 +0000 UTC m=+149.269717800 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:54 crc kubenswrapper[5048]: I1213 06:31:54.993405 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7" event={"ID":"7c4ae147-0445-404f-9c19-bd55e71ceae8","Type":"ContainerStarted","Data":"ba1259bee5d65e14f621c615de7b64b26186551fc3618925f5eb6bfe7a884aaf"} Dec 13 06:31:54 crc kubenswrapper[5048]: I1213 06:31:54.994681 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-vtxwp" event={"ID":"5a408a68-6f27-4655-a067-3d2b08ad5a7d","Type":"ContainerStarted","Data":"b4ca68768db430ecbeb8e6c8dfea8b866254bfdefeb1fdeb35c92350e1541391"} Dec 13 06:31:55 crc kubenswrapper[5048]: I1213 06:31:55.004370 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:55 crc kubenswrapper[5048]: E1213 06:31:55.004498 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:55.504477307 +0000 UTC m=+149.371071898 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:55 crc kubenswrapper[5048]: I1213 06:31:55.004614 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:55 crc kubenswrapper[5048]: E1213 06:31:55.004944 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:55.504935279 +0000 UTC m=+149.371529860 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:55 crc kubenswrapper[5048]: I1213 06:31:55.106015 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:55 crc kubenswrapper[5048]: E1213 06:31:55.106383 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:55.606352137 +0000 UTC m=+149.472946728 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:55 crc kubenswrapper[5048]: I1213 06:31:55.106597 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:55 crc kubenswrapper[5048]: E1213 06:31:55.107098 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:55.607076597 +0000 UTC m=+149.473671218 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:55 crc kubenswrapper[5048]: I1213 06:31:55.207346 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:55 crc kubenswrapper[5048]: E1213 06:31:55.207543 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:55.70751785 +0000 UTC m=+149.574112431 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:55 crc kubenswrapper[5048]: I1213 06:31:55.207636 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:55 crc kubenswrapper[5048]: E1213 06:31:55.207963 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:55.707950951 +0000 UTC m=+149.574545532 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:55 crc kubenswrapper[5048]: I1213 06:31:55.309151 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:55 crc kubenswrapper[5048]: E1213 06:31:55.309271 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:55.809249276 +0000 UTC m=+149.675843867 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:55 crc kubenswrapper[5048]: I1213 06:31:55.309318 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:55 crc kubenswrapper[5048]: E1213 06:31:55.309679 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:55.809668878 +0000 UTC m=+149.676263469 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:55 crc kubenswrapper[5048]: I1213 06:31:55.410402 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:55 crc kubenswrapper[5048]: E1213 06:31:55.410556 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:55.910533382 +0000 UTC m=+149.777127963 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:55 crc kubenswrapper[5048]: I1213 06:31:55.410750 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:55 crc kubenswrapper[5048]: E1213 06:31:55.411049 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:55.911036875 +0000 UTC m=+149.777631456 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:55 crc kubenswrapper[5048]: I1213 06:31:55.511793 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:55 crc kubenswrapper[5048]: E1213 06:31:55.512272 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:56.012253098 +0000 UTC m=+149.878847679 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:55 crc kubenswrapper[5048]: I1213 06:31:55.613388 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:55 crc kubenswrapper[5048]: E1213 06:31:55.613817 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:56.113800701 +0000 UTC m=+149.980395282 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:55 crc kubenswrapper[5048]: I1213 06:31:55.714864 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:55 crc kubenswrapper[5048]: E1213 06:31:55.715059 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:56.215033924 +0000 UTC m=+150.081628505 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:55 crc kubenswrapper[5048]: I1213 06:31:55.715590 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:55 crc kubenswrapper[5048]: E1213 06:31:55.716173 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:56.216157085 +0000 UTC m=+150.082751666 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:55 crc kubenswrapper[5048]: I1213 06:31:55.817123 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:55 crc kubenswrapper[5048]: E1213 06:31:55.817275 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:56.317249414 +0000 UTC m=+150.183843995 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:55 crc kubenswrapper[5048]: I1213 06:31:55.817484 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:55 crc kubenswrapper[5048]: E1213 06:31:55.817876 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:56.31786469 +0000 UTC m=+150.184459351 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:55 crc kubenswrapper[5048]: I1213 06:31:55.919198 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:55 crc kubenswrapper[5048]: I1213 06:31:55.919564 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:55 crc kubenswrapper[5048]: E1213 06:31:55.924596 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:56.424564651 +0000 UTC m=+150.291159242 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.002271 5048 generic.go:334] "Generic (PLEG): container finished" podID="5a408a68-6f27-4655-a067-3d2b08ad5a7d" containerID="b4ca68768db430ecbeb8e6c8dfea8b866254bfdefeb1fdeb35c92350e1541391" exitCode=0 Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.002332 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-vtxwp" event={"ID":"5a408a68-6f27-4655-a067-3d2b08ad5a7d","Type":"ContainerDied","Data":"b4ca68768db430ecbeb8e6c8dfea8b866254bfdefeb1fdeb35c92350e1541391"} Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.004114 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-g9bxc" event={"ID":"b9c10fa1-af81-4b42-9c40-55c4a7aa6703","Type":"ContainerStarted","Data":"5a46f5ab9522ff1a21d9b59b32135881dd25a5a81232ffe93dd9ee3772f8bad5"} Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.021091 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.021181 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.021211 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.021264 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:56 crc kubenswrapper[5048]: E1213 06:31:56.024910 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:56.52488659 +0000 UTC m=+150.391481171 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.029523 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.029716 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.030350 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.065138 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.081746 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.093147 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.102766 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.124337 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:56 crc kubenswrapper[5048]: E1213 06:31:56.124975 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:56.624948773 +0000 UTC m=+150.491543354 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.228476 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:56 crc kubenswrapper[5048]: E1213 06:31:56.229304 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:56.729286659 +0000 UTC m=+150.595881240 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.330106 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:56 crc kubenswrapper[5048]: E1213 06:31:56.330275 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:56.830248335 +0000 UTC m=+150.696842916 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.330424 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:56 crc kubenswrapper[5048]: E1213 06:31:56.330817 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:56.830805211 +0000 UTC m=+150.697399792 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.431363 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:56 crc kubenswrapper[5048]: E1213 06:31:56.431751 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:56.931712516 +0000 UTC m=+150.798307097 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.432109 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:56 crc kubenswrapper[5048]: E1213 06:31:56.432489 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:56.932470015 +0000 UTC m=+150.799064606 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.533133 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:56 crc kubenswrapper[5048]: E1213 06:31:56.533604 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:57.033586817 +0000 UTC m=+150.900181398 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.634471 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:56 crc kubenswrapper[5048]: E1213 06:31:56.635096 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:57.135072667 +0000 UTC m=+151.001667238 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.746507 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:56 crc kubenswrapper[5048]: E1213 06:31:56.751989 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:57.251955139 +0000 UTC m=+151.118549730 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.766307 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:56 crc kubenswrapper[5048]: E1213 06:31:56.767013 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:57.26699322 +0000 UTC m=+151.133587801 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.877032 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:56 crc kubenswrapper[5048]: E1213 06:31:56.877579 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:57.377538773 +0000 UTC m=+151.244133354 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:56 crc kubenswrapper[5048]: I1213 06:31:56.979015 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:56 crc kubenswrapper[5048]: E1213 06:31:56.979797 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:57.479768183 +0000 UTC m=+151.346362764 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:57 crc kubenswrapper[5048]: I1213 06:31:57.082156 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:57 crc kubenswrapper[5048]: E1213 06:31:57.082570 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:57.582553869 +0000 UTC m=+151.449148450 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:57 crc kubenswrapper[5048]: I1213 06:31:57.112254 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m4qpv" event={"ID":"c5a72af5-cc5a-4d16-acc9-2126107c6b6c","Type":"ContainerStarted","Data":"ff7d801f8da0e48a9e2f1fc3f6b08faf1d935e059f584ba117785a6c793f52af"} Dec 13 06:31:57 crc kubenswrapper[5048]: I1213 06:31:57.185285 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:57 crc kubenswrapper[5048]: E1213 06:31:57.185883 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:57.685864628 +0000 UTC m=+151.552459209 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:57 crc kubenswrapper[5048]: I1213 06:31:57.194948 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l5kdh" event={"ID":"cdcbb504-e2d6-4511-bf24-d18ba641f45b","Type":"ContainerStarted","Data":"ff6ff6757e2d74634212549e35cd2fa87c204bd4d2a6feb17a87635415f00de1"} Dec 13 06:31:57 crc kubenswrapper[5048]: I1213 06:31:57.207191 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" event={"ID":"d0451ecc-c881-4b85-a6af-63796033ef83","Type":"ContainerStarted","Data":"286f930c1a6f57aff763520467244c832511c1e6a3f40ce3fdb30ad6bb950008"} Dec 13 06:31:57 crc kubenswrapper[5048]: I1213 06:31:57.254564 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" event={"ID":"aa6cce4c-6bc2-469b-9062-e928744616db","Type":"ContainerStarted","Data":"59438fd1a765171cdab7887f9dcf93f2bdcb06b763d2382439cebf62accdff28"} Dec 13 06:31:57 crc kubenswrapper[5048]: I1213 06:31:57.258514 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:31:57 crc kubenswrapper[5048]: I1213 06:31:57.268611 5048 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-dwk4m container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.25:6443/healthz\": dial tcp 10.217.0.25:6443: connect: connection refused" start-of-body= Dec 13 06:31:57 crc kubenswrapper[5048]: I1213 06:31:57.268705 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" podUID="aa6cce4c-6bc2-469b-9062-e928744616db" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.25:6443/healthz\": dial tcp 10.217.0.25:6443: connect: connection refused" Dec 13 06:31:57 crc kubenswrapper[5048]: I1213 06:31:57.278366 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-x4k67" event={"ID":"8b20de69-950c-42d1-b967-d0fc59d035cd","Type":"ContainerStarted","Data":"2898ad599e9ec6f54f642ec7ea0d8d543ed6314fa3174c2bf6a34220ef990473"} Dec 13 06:31:57 crc kubenswrapper[5048]: I1213 06:31:57.287212 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:57 crc kubenswrapper[5048]: E1213 06:31:57.288677 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:57.788660303 +0000 UTC m=+151.655254884 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:57 crc kubenswrapper[5048]: I1213 06:31:57.304786 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5qgj6" event={"ID":"493b8318-26e3-4f4e-b5d1-e1b2fd57de35","Type":"ContainerStarted","Data":"98e33d9b86ffd7001b13f200267f035c5ea3407f6252cb20bae49d25707a05bf"} Dec 13 06:31:57 crc kubenswrapper[5048]: I1213 06:31:57.334078 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-6p4wm" event={"ID":"603fdfea-79ed-48f4-89b0-2231fe8fed87","Type":"ContainerStarted","Data":"012ebb38fd77cca13d053fe49106983f3a636c4cf3b16dab1498a67f00321d99"} Dec 13 06:31:57 crc kubenswrapper[5048]: I1213 06:31:57.342246 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8gchm" event={"ID":"2e941413-0d6f-4fe7-bfc1-218b8bc6d026","Type":"ContainerStarted","Data":"3159801a5e8115b38cab0a7ed8b6aa283ac467d83596456b3e78749e8d765aa2"} Dec 13 06:31:57 crc kubenswrapper[5048]: I1213 06:31:57.384314 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7ch79" event={"ID":"bfeb748d-a196-4696-a599-70a6386cb89b","Type":"ContainerStarted","Data":"b342007b8c53279656aebdd0451bc10fef0f52ee7bcf90905650c61e62096a36"} Dec 13 06:31:57 crc kubenswrapper[5048]: I1213 06:31:57.388644 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:57 crc kubenswrapper[5048]: E1213 06:31:57.401234 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:57.901203419 +0000 UTC m=+151.767798000 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:57 crc kubenswrapper[5048]: I1213 06:31:57.417998 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2n95" event={"ID":"4b48e4b5-ade1-4838-8866-692179d8418a","Type":"ContainerStarted","Data":"59bc77faf0d537db81f1e7d9c6327a97eaf1beaa470ca47d712d2fdfb8c82c87"} Dec 13 06:31:57 crc kubenswrapper[5048]: I1213 06:31:57.507287 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:57 crc kubenswrapper[5048]: E1213 06:31:57.508590 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:58.008572427 +0000 UTC m=+151.875167008 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:57 crc kubenswrapper[5048]: I1213 06:31:57.513799 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5nt4n" event={"ID":"1de0ebfd-b283-4790-badb-fb78d80e6703","Type":"ContainerStarted","Data":"93370343880a95b30a6b8d074b79fd64176354922c3f0aa562093d29ff66ec23"} Dec 13 06:31:57 crc kubenswrapper[5048]: I1213 06:31:57.555000 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-6zbpx" event={"ID":"c4d0beae-b187-40c3-bb35-aac05eec25b0","Type":"ContainerStarted","Data":"1ebc5a1c138a05c6b5be492b1bcd70aecb5ccf79439b60fcdf672abf710f2afa"} Dec 13 06:31:57 crc kubenswrapper[5048]: I1213 06:31:57.629278 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:57 crc kubenswrapper[5048]: E1213 06:31:57.632284 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:58.132271751 +0000 UTC m=+151.998866332 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:57 crc kubenswrapper[5048]: I1213 06:31:57.940814 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:57 crc kubenswrapper[5048]: E1213 06:31:57.941588 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:58.441571261 +0000 UTC m=+152.308165842 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.021013 5048 generic.go:334] "Generic (PLEG): container finished" podID="058c0262-48b0-4176-9f52-20de42c46477" containerID="8babfac359def5aa4b3eeff18383a188db75de278c88e5d9e5a14c8716cd9b5a" exitCode=0 Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.021090 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hxskn" event={"ID":"058c0262-48b0-4176-9f52-20de42c46477","Type":"ContainerDied","Data":"8babfac359def5aa4b3eeff18383a188db75de278c88e5d9e5a14c8716cd9b5a"} Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.042874 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:58 crc kubenswrapper[5048]: E1213 06:31:58.045070 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:58.545055875 +0000 UTC m=+152.411650456 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.050217 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4vp8z" event={"ID":"05dc53d2-6b17-42e1-b211-43716c5f3037","Type":"ContainerStarted","Data":"742df920d011f3da6c4f603b14fb58cb3f8a02598a9bf200507cd8e7254a4576"} Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.131755 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r8m8l" event={"ID":"3a66da44-fbdf-4676-8250-0b146b67a5e9","Type":"ContainerStarted","Data":"76e3ff4aec53bedaea14663cd6daac9b7a85921db76637a12193aedb53fde45a"} Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.133813 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r8m8l" Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.143962 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:58 crc kubenswrapper[5048]: E1213 06:31:58.145101 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:58.645083888 +0000 UTC m=+152.511678469 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.177582 5048 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-r8m8l container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" start-of-body= Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.177654 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r8m8l" podUID="3a66da44-fbdf-4676-8250-0b146b67a5e9" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.177816 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-7w8rw" event={"ID":"6365e1b5-5ddc-4f83-8388-6ffc37bf18e2","Type":"ContainerStarted","Data":"18893a8d510b7088ce5bc25c006022316c495c1ec4e6d5b5b9b58989fcd0a724"} Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.222407 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-895rs" event={"ID":"2423ea84-11dc-47d0-8d1d-1ce24260f02e","Type":"ContainerStarted","Data":"b3bb80ab02a31efd18bc63cc0be1d8f7de665d4086f16c0cae562c5ca6226292"} Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.230723 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7pgnt" event={"ID":"ab459e43-1b40-481c-901c-20344fb51434","Type":"ContainerStarted","Data":"85393839c5dfeeb74ef24216531a5352b56a133c4de1084f6257a08d9dd32a03"} Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.241796 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" event={"ID":"6111e5a8-1616-417d-a3d6-d7b1a39ec709","Type":"ContainerStarted","Data":"b7148a268f666f5da3f9e1c5abb098bd39089e64356c5a1497cb5e28edb25d4d"} Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.243146 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.246096 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.246660 5048 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-cwvrg container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.43:8080/healthz\": dial tcp 10.217.0.43:8080: connect: connection refused" start-of-body= Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.246824 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" podUID="6111e5a8-1616-417d-a3d6-d7b1a39ec709" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.43:8080/healthz\": dial tcp 10.217.0.43:8080: connect: connection refused" Dec 13 06:31:58 crc kubenswrapper[5048]: E1213 06:31:58.248645 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:58.748626893 +0000 UTC m=+152.615221474 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.260784 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" event={"ID":"2754f9a4-6375-4864-9d69-7674e3dfe490","Type":"ContainerStarted","Data":"37e4dd9d4e5b70d1b371ddbd76600fcb6033b8bbeb0e279746791dfc13cc199a"} Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.263191 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.299123 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rd7ph" event={"ID":"4359b12c-c2c7-484a-978d-388afe815a40","Type":"ContainerStarted","Data":"60a8083ea5d9a0f459dba8c6aaacdcc7f4a1de8f7672e85feea2aca79b2e2a8c"} Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.355098 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:58 crc kubenswrapper[5048]: E1213 06:31:58.356530 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:58.856508704 +0000 UTC m=+152.723103295 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.391460 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-xclb7" event={"ID":"f567d62b-7941-466c-84c8-06f6854000ba","Type":"ContainerStarted","Data":"d129e8a65e094be058f84a723e5100bb31803a2ddd188b6b79731de4b5da44be"} Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.391590 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-xclb7" Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.425252 5048 patch_prober.go:28] interesting pod/downloads-7954f5f757-xclb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.425315 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xclb7" podUID="f567d62b-7941-466c-84c8-06f6854000ba" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.459603 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:58 crc kubenswrapper[5048]: E1213 06:31:58.461278 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:58.961262381 +0000 UTC m=+152.827856962 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.560759 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:58 crc kubenswrapper[5048]: E1213 06:31:58.562148 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:59.062131645 +0000 UTC m=+152.928726226 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.668812 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:58 crc kubenswrapper[5048]: E1213 06:31:58.669651 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:59.169638196 +0000 UTC m=+153.036232777 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.739009 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.770094 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:58 crc kubenswrapper[5048]: E1213 06:31:58.770468 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:59.270453519 +0000 UTC m=+153.137048100 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.907402 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rd7ph" podStartSLOduration=128.907384987 podStartE2EDuration="2m8.907384987s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:31:58.905006383 +0000 UTC m=+152.771600964" watchObservedRunningTime="2025-12-13 06:31:58.907384987 +0000 UTC m=+152.773979568" Dec 13 06:31:58 crc kubenswrapper[5048]: I1213 06:31:58.922032 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:58 crc kubenswrapper[5048]: E1213 06:31:58.931572 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:59.431549912 +0000 UTC m=+153.298144493 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.033081 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:59 crc kubenswrapper[5048]: E1213 06:31:59.033649 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:59.533627008 +0000 UTC m=+153.400221589 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.135363 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:59 crc kubenswrapper[5048]: E1213 06:31:59.135813 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:59.635794717 +0000 UTC m=+153.502389308 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.209458 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-75h2c" podStartSLOduration=129.209397743 podStartE2EDuration="2m9.209397743s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:31:59.075744183 +0000 UTC m=+152.942338764" watchObservedRunningTime="2025-12-13 06:31:59.209397743 +0000 UTC m=+153.075992324" Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.220659 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-6zbpx" podStartSLOduration=12.220638023 podStartE2EDuration="12.220638023s" podCreationTimestamp="2025-12-13 06:31:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:31:59.208525789 +0000 UTC m=+153.075120370" watchObservedRunningTime="2025-12-13 06:31:59.220638023 +0000 UTC m=+153.087232614" Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.238194 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:59 crc kubenswrapper[5048]: E1213 06:31:59.238635 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:59.738615423 +0000 UTC m=+153.605210004 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.339990 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:59 crc kubenswrapper[5048]: E1213 06:31:59.340556 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:31:59.840544005 +0000 UTC m=+153.707138586 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.381995 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.382396 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.383336 5048 patch_prober.go:28] interesting pod/console-f9d7485db-5nt4n container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.8:8443/health\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.383404 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-5nt4n" podUID="1de0ebfd-b283-4790-badb-fb78d80e6703" containerName="console" probeResult="failure" output="Get \"https://10.217.0.8:8443/health\": dial tcp 10.217.0.8:8443: connect: connection refused" Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.470272 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:59 crc kubenswrapper[5048]: E1213 06:31:59.470640 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:31:59.970622189 +0000 UTC m=+153.837216770 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.492844 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r8m8l" podStartSLOduration=129.492826982 podStartE2EDuration="2m9.492826982s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:31:59.492696759 +0000 UTC m=+153.359291340" watchObservedRunningTime="2025-12-13 06:31:59.492826982 +0000 UTC m=+153.359421563" Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.584097 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:59 crc kubenswrapper[5048]: E1213 06:31:59.584463 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:00.084430149 +0000 UTC m=+153.951024730 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.587324 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-xkmc7" event={"ID":"9d2b0c42-857f-4f52-8742-02c7ee9a9fd3","Type":"ContainerStarted","Data":"716a1dc7daba38ea0ae3453af96cfc150bf1abc5c1b09c766aaf9f8404aa35d8"} Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.587371 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-xkmc7" event={"ID":"9d2b0c42-857f-4f52-8742-02c7ee9a9fd3","Type":"ContainerStarted","Data":"7e20853618dd01913128a96cc62285064552069b1f8eb98f20c0da51c57428f5"} Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.592813 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m4qpv" event={"ID":"c5a72af5-cc5a-4d16-acc9-2126107c6b6c","Type":"ContainerStarted","Data":"58081de54e32d73e67857058f77bcf99cb4a47d56b77246494ef63d18d6eb91d"} Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.594044 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-cxfwv" event={"ID":"27a55ec7-3458-4125-aacf-9c6a91b5145b","Type":"ContainerStarted","Data":"7974a475cad58b3637bf8f8ef99a10ca68df304a446b4bca6f43e5ad1f166225"} Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.594707 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-cxfwv" Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.599272 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-rtnpd" event={"ID":"54b2c6a4-d68f-4e1d-a686-626abdb6e127","Type":"ContainerStarted","Data":"dfb7976a398be7ff6ee2e01971e7c1353f7626a3be4eb83627d5be3d790fb584"} Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.600523 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9qv2z" event={"ID":"e2896f64-29a2-4d9e-9836-e0abe2a8162f","Type":"ContainerStarted","Data":"79dbad1726aa21c5895236243d747dc89d6a79d07e36f287cabd2c625cf08829"} Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.601031 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9qv2z" Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.607112 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"763caa4938dbad2c04cc226e00d3699085e38a81c5ccaaf92825d3d86df624c4"} Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.691825 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"ab061743ebe65a9c9858d4ec8c881fd9f09adf1037c594ad909e773777284d44"} Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.698284 5048 patch_prober.go:28] interesting pod/console-operator-58897d9998-cxfwv container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.17:8443/readyz\": dial tcp 10.217.0.17:8443: connect: connection refused" start-of-body= Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.698350 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-cxfwv" podUID="27a55ec7-3458-4125-aacf-9c6a91b5145b" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.17:8443/readyz\": dial tcp 10.217.0.17:8443: connect: connection refused" Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.698425 5048 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-9qv2z container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" start-of-body= Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.698457 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9qv2z" podUID="e2896f64-29a2-4d9e-9836-e0abe2a8162f" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.714171 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:59 crc kubenswrapper[5048]: E1213 06:31:59.714566 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:00.214542455 +0000 UTC m=+154.081137026 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.716600 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:59 crc kubenswrapper[5048]: E1213 06:31:59.719697 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:00.219680581 +0000 UTC m=+154.086275162 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.727968 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-x4k67" event={"ID":"8b20de69-950c-42d1-b967-d0fc59d035cd","Type":"ContainerStarted","Data":"e5caadeb0621767fed4cc8aad9f81da72cb2541742cdfdf3b34f6c3e7d5c6d25"} Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.851067 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-lwkxb" event={"ID":"d37b6448-0b50-4bf9-b345-0feec5f5babb","Type":"ContainerStarted","Data":"06f9e7f2978f489f10e3aa55663f1c541fc50aeae36d67dec363224d3bf8fe82"} Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.854854 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:59 crc kubenswrapper[5048]: E1213 06:31:59.855198 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:00.355142799 +0000 UTC m=+154.221737400 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.855666 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:31:59 crc kubenswrapper[5048]: E1213 06:31:59.857227 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:00.357213084 +0000 UTC m=+154.223807665 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.862647 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-28jpz" event={"ID":"6a24e010-6ffa-45f6-9b92-99896a2c287f","Type":"ContainerStarted","Data":"cc95694a61fdf06aff013ab7161917990e362cbbe9862144e731e9acb58938f3"} Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.862703 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-28jpz" event={"ID":"6a24e010-6ffa-45f6-9b92-99896a2c287f","Type":"ContainerStarted","Data":"6924eabe92e7d4beac24cb2a841aae7f627c88ff6288f96b22388cab1baf7295"} Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.863363 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-28jpz" Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.917971 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7pgnt" event={"ID":"ab459e43-1b40-481c-901c-20344fb51434","Type":"ContainerStarted","Data":"6a93c40bf015f4df63d8d4865a628ae766ed3940870be37a12055530539a9425"} Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.956951 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:31:59 crc kubenswrapper[5048]: E1213 06:31:59.957382 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:00.457339839 +0000 UTC m=+154.323934420 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.961973 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zrtpr" event={"ID":"6a6efd27-be04-47b3-8f2c-fa84e687e4f2","Type":"ContainerStarted","Data":"0a47e473cdfd2533314e4bc497dabe39ccf018ff4d41d9c7756bcedec8a9aee5"} Dec 13 06:31:59 crc kubenswrapper[5048]: I1213 06:31:59.962056 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zrtpr" event={"ID":"6a6efd27-be04-47b3-8f2c-fa84e687e4f2","Type":"ContainerStarted","Data":"314c2e3fd5d3946640a3ab9afc4163da942853f74d61b480f992b2648e3c08d6"} Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.073808 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-gzp2z" event={"ID":"5dc88be1-84b6-4574-9b80-de53c49a7b1e","Type":"ContainerStarted","Data":"71bd7cec3082cdca2ee122051d0a462f934ccfa92042edcbe0f92fb0ddbb24a8"} Dec 13 06:32:00 crc kubenswrapper[5048]: E1213 06:32:00.074475 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:00.574456767 +0000 UTC m=+154.441051348 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.075336 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.084130 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"6ffb334170dca0e67036158e6a32087a07b4346d896e4c18953a3160c82c4651"} Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.084561 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"6064975b8f7bc8a066886448e1e8107fb6a97b036fc8761ac54d14a6cad3f716"} Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.110798 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-x5sc8" event={"ID":"81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4","Type":"ContainerStarted","Data":"609f6c38afc41017a92845c3d810f444ecb40eb6bb1aeb6d897942d302ab3557"} Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.144619 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-895rs" event={"ID":"2423ea84-11dc-47d0-8d1d-1ce24260f02e","Type":"ContainerStarted","Data":"86d328947fb5df4b20a256e25911b0af939a97920d72f211d7b6cbf9bedbdff6"} Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.156852 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-rtnpd" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.168813 5048 patch_prober.go:28] interesting pod/router-default-5444994796-rtnpd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 13 06:32:00 crc kubenswrapper[5048]: [-]has-synced failed: reason withheld Dec 13 06:32:00 crc kubenswrapper[5048]: [+]process-running ok Dec 13 06:32:00 crc kubenswrapper[5048]: healthz check failed Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.168873 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rtnpd" podUID="54b2c6a4-d68f-4e1d-a686-626abdb6e127" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.170112 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" event={"ID":"48280256-4ec4-4530-a2f1-8bee7c6b4871","Type":"ContainerStarted","Data":"73c09be4481c36bcc20d58691bfafb28f43b16e504c9ea90fdc2bc9da7f173b0"} Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.177111 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:00 crc kubenswrapper[5048]: E1213 06:32:00.177648 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:00.677607912 +0000 UTC m=+154.544202493 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.196015 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq" event={"ID":"a458fdb3-0662-44a0-8df6-b81dcb66a669","Type":"ContainerStarted","Data":"9bdf2d1fabf5ccd8b64476b4f827e578a64b1d323d3bd599d2c6ac22bcdef70f"} Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.197301 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-t2n95" podStartSLOduration=131.197281407 podStartE2EDuration="2m11.197281407s" podCreationTimestamp="2025-12-13 06:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:00.123318971 +0000 UTC m=+153.989913562" watchObservedRunningTime="2025-12-13 06:32:00.197281407 +0000 UTC m=+154.063875978" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.228016 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5qgj6" event={"ID":"493b8318-26e3-4f4e-b5d1-e1b2fd57de35","Type":"ContainerStarted","Data":"65527b6273410b13ef2ce8f3d285f425f7421689057d5633b17a2aa780560219"} Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.228072 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5qgj6" event={"ID":"493b8318-26e3-4f4e-b5d1-e1b2fd57de35","Type":"ContainerStarted","Data":"d34d935c6442ac92896bce536d3bca0190126488f6f8c4b8f7708d3138de33ee"} Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.255327 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-6p4wm" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.273975 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7" event={"ID":"7c4ae147-0445-404f-9c19-bd55e71ceae8","Type":"ContainerStarted","Data":"110407f3dc5a8edb846e27898c7580cb9f4e4928191cd14aeb8f5be9ba3bccb8"} Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.274936 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.276493 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8gchm" podStartSLOduration=130.276470512 podStartE2EDuration="2m10.276470512s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:00.221819783 +0000 UTC m=+154.088414384" watchObservedRunningTime="2025-12-13 06:32:00.276470512 +0000 UTC m=+154.143065083" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.278027 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-7w8rw" podStartSLOduration=131.278020484 podStartE2EDuration="2m11.278020484s" podCreationTimestamp="2025-12-13 06:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:00.264053321 +0000 UTC m=+154.130647902" watchObservedRunningTime="2025-12-13 06:32:00.278020484 +0000 UTC m=+154.144615065" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.278209 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:00 crc kubenswrapper[5048]: E1213 06:32:00.281155 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:00.781141107 +0000 UTC m=+154.647735688 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.298059 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" event={"ID":"1e226353-ea51-4aa1-921b-63a11a769cc7","Type":"ContainerStarted","Data":"3beea1bab93319dc434104d03a3dfbcbe9ee6325cf67f7c8d5efbb6040a87004"} Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.299653 5048 patch_prober.go:28] interesting pod/downloads-7954f5f757-xclb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.299714 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xclb7" podUID="f567d62b-7941-466c-84c8-06f6854000ba" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.310114 5048 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-cwvrg container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.43:8080/healthz\": dial tcp 10.217.0.43:8080: connect: connection refused" start-of-body= Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.310214 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" podUID="6111e5a8-1616-417d-a3d6-d7b1a39ec709" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.43:8080/healthz\": dial tcp 10.217.0.43:8080: connect: connection refused" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.326979 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.359934 5048 patch_prober.go:28] interesting pod/downloads-7954f5f757-xclb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.359992 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xclb7" podUID="f567d62b-7941-466c-84c8-06f6854000ba" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.366569 5048 patch_prober.go:28] interesting pod/downloads-7954f5f757-xclb7 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.366966 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-xclb7" podUID="f567d62b-7941-466c-84c8-06f6854000ba" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.366602 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r8m8l" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.380036 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:00 crc kubenswrapper[5048]: E1213 06:32:00.381930 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:00.881909508 +0000 UTC m=+154.748504089 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.406836 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-xclb7" podStartSLOduration=131.406805913 podStartE2EDuration="2m11.406805913s" podCreationTimestamp="2025-12-13 06:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:00.350650744 +0000 UTC m=+154.217245335" watchObservedRunningTime="2025-12-13 06:32:00.406805913 +0000 UTC m=+154.273400494" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.473962 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-g9bxc" podStartSLOduration=130.473939036 podStartE2EDuration="2m10.473939036s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:00.406713061 +0000 UTC m=+154.273307652" watchObservedRunningTime="2025-12-13 06:32:00.473939036 +0000 UTC m=+154.340533617" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.475389 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-5nt4n" podStartSLOduration=131.475380175 podStartE2EDuration="2m11.475380175s" podCreationTimestamp="2025-12-13 06:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:00.473585637 +0000 UTC m=+154.340180248" watchObservedRunningTime="2025-12-13 06:32:00.475380175 +0000 UTC m=+154.341974756" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.482665 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:00 crc kubenswrapper[5048]: E1213 06:32:00.500237 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:01.000215079 +0000 UTC m=+154.866809660 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.547442 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-q8ltl"] Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.548658 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q8ltl" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.555590 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.557258 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" podStartSLOduration=130.557229321 podStartE2EDuration="2m10.557229321s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:00.555938996 +0000 UTC m=+154.422533617" watchObservedRunningTime="2025-12-13 06:32:00.557229321 +0000 UTC m=+154.423823922" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.596888 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:00 crc kubenswrapper[5048]: E1213 06:32:00.597323 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:01.097301811 +0000 UTC m=+154.963896392 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.603144 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q8ltl"] Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.698606 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7b93b87-31d6-4279-8ac9-b834417f66d9-catalog-content\") pod \"community-operators-q8ltl\" (UID: \"f7b93b87-31d6-4279-8ac9-b834417f66d9\") " pod="openshift-marketplace/community-operators-q8ltl" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.698646 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7b93b87-31d6-4279-8ac9-b834417f66d9-utilities\") pod \"community-operators-q8ltl\" (UID: \"f7b93b87-31d6-4279-8ac9-b834417f66d9\") " pod="openshift-marketplace/community-operators-q8ltl" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.698704 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.698654 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" podStartSLOduration=130.698634077 podStartE2EDuration="2m10.698634077s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:00.610752141 +0000 UTC m=+154.477346732" watchObservedRunningTime="2025-12-13 06:32:00.698634077 +0000 UTC m=+154.565228658" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.698767 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bz7t\" (UniqueName: \"kubernetes.io/projected/f7b93b87-31d6-4279-8ac9-b834417f66d9-kube-api-access-6bz7t\") pod \"community-operators-q8ltl\" (UID: \"f7b93b87-31d6-4279-8ac9-b834417f66d9\") " pod="openshift-marketplace/community-operators-q8ltl" Dec 13 06:32:00 crc kubenswrapper[5048]: E1213 06:32:00.698989 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:01.198975187 +0000 UTC m=+155.065569768 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.705845 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" podStartSLOduration=131.70582359 podStartE2EDuration="2m11.70582359s" podCreationTimestamp="2025-12-13 06:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:00.696909762 +0000 UTC m=+154.563504353" watchObservedRunningTime="2025-12-13 06:32:00.70582359 +0000 UTC m=+154.572418171" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.767718 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4vp8z" podStartSLOduration=130.767697743 podStartE2EDuration="2m10.767697743s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:00.76425305 +0000 UTC m=+154.630847651" watchObservedRunningTime="2025-12-13 06:32:00.767697743 +0000 UTC m=+154.634292324" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.800475 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.800702 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bz7t\" (UniqueName: \"kubernetes.io/projected/f7b93b87-31d6-4279-8ac9-b834417f66d9-kube-api-access-6bz7t\") pod \"community-operators-q8ltl\" (UID: \"f7b93b87-31d6-4279-8ac9-b834417f66d9\") " pod="openshift-marketplace/community-operators-q8ltl" Dec 13 06:32:00 crc kubenswrapper[5048]: E1213 06:32:00.800825 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:01.300792606 +0000 UTC m=+155.167387197 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.800968 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7b93b87-31d6-4279-8ac9-b834417f66d9-catalog-content\") pod \"community-operators-q8ltl\" (UID: \"f7b93b87-31d6-4279-8ac9-b834417f66d9\") " pod="openshift-marketplace/community-operators-q8ltl" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.801017 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7b93b87-31d6-4279-8ac9-b834417f66d9-utilities\") pod \"community-operators-q8ltl\" (UID: \"f7b93b87-31d6-4279-8ac9-b834417f66d9\") " pod="openshift-marketplace/community-operators-q8ltl" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.801182 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.801522 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7b93b87-31d6-4279-8ac9-b834417f66d9-catalog-content\") pod \"community-operators-q8ltl\" (UID: \"f7b93b87-31d6-4279-8ac9-b834417f66d9\") " pod="openshift-marketplace/community-operators-q8ltl" Dec 13 06:32:00 crc kubenswrapper[5048]: E1213 06:32:00.801717 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:01.30170136 +0000 UTC m=+155.168296001 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.801800 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7b93b87-31d6-4279-8ac9-b834417f66d9-utilities\") pod \"community-operators-q8ltl\" (UID: \"f7b93b87-31d6-4279-8ac9-b834417f66d9\") " pod="openshift-marketplace/community-operators-q8ltl" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.848514 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgc8n" podStartSLOduration=130.84849957 podStartE2EDuration="2m10.84849957s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:00.847543385 +0000 UTC m=+154.714137996" watchObservedRunningTime="2025-12-13 06:32:00.84849957 +0000 UTC m=+154.715094151" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.903073 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:00 crc kubenswrapper[5048]: E1213 06:32:00.903477 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:01.403459019 +0000 UTC m=+155.270053600 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.904616 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bz7t\" (UniqueName: \"kubernetes.io/projected/f7b93b87-31d6-4279-8ac9-b834417f66d9-kube-api-access-6bz7t\") pod \"community-operators-q8ltl\" (UID: \"f7b93b87-31d6-4279-8ac9-b834417f66d9\") " pod="openshift-marketplace/community-operators-q8ltl" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.911689 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q8ltl" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.935348 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vfwpt"] Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.936789 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vfwpt" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.989468 5048 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-cwvrg container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.43:8080/healthz\": dial tcp 10.217.0.43:8080: connect: connection refused" start-of-body= Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.989783 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" podUID="6111e5a8-1616-417d-a3d6-d7b1a39ec709" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.43:8080/healthz\": dial tcp 10.217.0.43:8080: connect: connection refused" Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.989555 5048 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-cwvrg container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.43:8080/healthz\": dial tcp 10.217.0.43:8080: connect: connection refused" start-of-body= Dec 13 06:32:00 crc kubenswrapper[5048]: I1213 06:32:00.989946 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" podUID="6111e5a8-1616-417d-a3d6-d7b1a39ec709" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.43:8080/healthz\": dial tcp 10.217.0.43:8080: connect: connection refused" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.047573 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:01 crc kubenswrapper[5048]: E1213 06:32:01.048239 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:01.548225045 +0000 UTC m=+155.414819636 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.154034 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:01 crc kubenswrapper[5048]: E1213 06:32:01.155663 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:01.655641084 +0000 UTC m=+155.522235665 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.155741 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8r72\" (UniqueName: \"kubernetes.io/projected/07531c82-87d1-409f-9c5a-4910633b5786-kube-api-access-r8r72\") pod \"community-operators-vfwpt\" (UID: \"07531c82-87d1-409f-9c5a-4910633b5786\") " pod="openshift-marketplace/community-operators-vfwpt" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.155768 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.155814 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07531c82-87d1-409f-9c5a-4910633b5786-catalog-content\") pod \"community-operators-vfwpt\" (UID: \"07531c82-87d1-409f-9c5a-4910633b5786\") " pod="openshift-marketplace/community-operators-vfwpt" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.155859 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07531c82-87d1-409f-9c5a-4910633b5786-utilities\") pod \"community-operators-vfwpt\" (UID: \"07531c82-87d1-409f-9c5a-4910633b5786\") " pod="openshift-marketplace/community-operators-vfwpt" Dec 13 06:32:01 crc kubenswrapper[5048]: E1213 06:32:01.156132 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:01.656124266 +0000 UTC m=+155.522718847 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.156183 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-rtnpd" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.212754 5048 patch_prober.go:28] interesting pod/router-default-5444994796-rtnpd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 13 06:32:01 crc kubenswrapper[5048]: [-]has-synced failed: reason withheld Dec 13 06:32:01 crc kubenswrapper[5048]: [+]process-running ok Dec 13 06:32:01 crc kubenswrapper[5048]: healthz check failed Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.213732 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rtnpd" podUID="54b2c6a4-d68f-4e1d-a686-626abdb6e127" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.256882 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:01 crc kubenswrapper[5048]: E1213 06:32:01.257308 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:01.757285058 +0000 UTC m=+155.623879639 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.257495 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07531c82-87d1-409f-9c5a-4910633b5786-catalog-content\") pod \"community-operators-vfwpt\" (UID: \"07531c82-87d1-409f-9c5a-4910633b5786\") " pod="openshift-marketplace/community-operators-vfwpt" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.257639 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07531c82-87d1-409f-9c5a-4910633b5786-utilities\") pod \"community-operators-vfwpt\" (UID: \"07531c82-87d1-409f-9c5a-4910633b5786\") " pod="openshift-marketplace/community-operators-vfwpt" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.257794 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8r72\" (UniqueName: \"kubernetes.io/projected/07531c82-87d1-409f-9c5a-4910633b5786-kube-api-access-r8r72\") pod \"community-operators-vfwpt\" (UID: \"07531c82-87d1-409f-9c5a-4910633b5786\") " pod="openshift-marketplace/community-operators-vfwpt" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.257904 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07531c82-87d1-409f-9c5a-4910633b5786-catalog-content\") pod \"community-operators-vfwpt\" (UID: \"07531c82-87d1-409f-9c5a-4910633b5786\") " pod="openshift-marketplace/community-operators-vfwpt" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.257916 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.258258 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07531c82-87d1-409f-9c5a-4910633b5786-utilities\") pod \"community-operators-vfwpt\" (UID: \"07531c82-87d1-409f-9c5a-4910633b5786\") " pod="openshift-marketplace/community-operators-vfwpt" Dec 13 06:32:01 crc kubenswrapper[5048]: E1213 06:32:01.258478 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:01.758460549 +0000 UTC m=+155.625055180 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.300886 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vfwpt"] Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.324639 5048 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-9tcn7 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.19:5443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.325003 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7" podUID="7c4ae147-0445-404f-9c19-bd55e71ceae8" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.19:5443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.343173 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9qv2z" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.361109 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:01 crc kubenswrapper[5048]: E1213 06:32:01.361670 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:01.861641746 +0000 UTC m=+155.728236327 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.373098 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"cde6f383be3f51eaaa81169800f092ed5939aa92323f6b20a84ca0b000edec93"} Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.412262 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-vtxwp" event={"ID":"5a408a68-6f27-4655-a067-3d2b08ad5a7d","Type":"ContainerStarted","Data":"ca1cdf2b1ba9636d703e7fc6342598e1acb7542f4943fe596b1d5bd34655629b"} Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.413051 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-vtxwp" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.438945 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hxskn" event={"ID":"058c0262-48b0-4176-9f52-20de42c46477","Type":"ContainerStarted","Data":"85e6a147e8b41eff61d1aeffed36b29ebe223b81f1830c8afdf4497a8b3fbc5d"} Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.447606 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.450040 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-6p4wm" event={"ID":"603fdfea-79ed-48f4-89b0-2231fe8fed87","Type":"ContainerStarted","Data":"66a00309da2892b7fadba813294cdb3dc61467bc9e460dabf85d1f66d182cb48"} Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.470173 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:01 crc kubenswrapper[5048]: E1213 06:32:01.470989 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:01.970971285 +0000 UTC m=+155.837565866 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.480177 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"48e0176257eefbaf1f570d65f1940fec401cfa8e2231935577637e414a35a3a4"} Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.482984 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.491799 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.572160 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.573610 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8r72\" (UniqueName: \"kubernetes.io/projected/07531c82-87d1-409f-9c5a-4910633b5786-kube-api-access-r8r72\") pod \"community-operators-vfwpt\" (UID: \"07531c82-87d1-409f-9c5a-4910633b5786\") " pod="openshift-marketplace/community-operators-vfwpt" Dec 13 06:32:01 crc kubenswrapper[5048]: E1213 06:32:01.574480 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:02.07446433 +0000 UTC m=+155.941058911 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.589236 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-w5fph" podStartSLOduration=131.589200033 podStartE2EDuration="2m11.589200033s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:01.561695519 +0000 UTC m=+155.428290110" watchObservedRunningTime="2025-12-13 06:32:01.589200033 +0000 UTC m=+155.455794624" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.695682 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:01 crc kubenswrapper[5048]: E1213 06:32:01.697078 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:02.197040493 +0000 UTC m=+156.063635074 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.701726 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lqvmp"] Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.704016 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lqvmp" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.707601 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hjls5"] Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.715063 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hjls5" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.733478 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.749502 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lqvmp"] Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.760198 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hjls5"] Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.826815 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7ch79" podStartSLOduration=132.826793749 podStartE2EDuration="2m12.826793749s" podCreationTimestamp="2025-12-13 06:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:01.788280291 +0000 UTC m=+155.654874872" watchObservedRunningTime="2025-12-13 06:32:01.826793749 +0000 UTC m=+155.693388330" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.833159 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:01 crc kubenswrapper[5048]: E1213 06:32:01.834418 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:02.334398242 +0000 UTC m=+156.200992823 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.861811 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vfwpt" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.941955 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.942016 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57717114-1abd-46bf-bdbd-0a785d734cd3-catalog-content\") pod \"certified-operators-lqvmp\" (UID: \"57717114-1abd-46bf-bdbd-0a785d734cd3\") " pod="openshift-marketplace/certified-operators-lqvmp" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.942045 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwtbg\" (UniqueName: \"kubernetes.io/projected/1ea76a75-e8de-4a91-89af-726df36e8a21-kube-api-access-cwtbg\") pod \"certified-operators-hjls5\" (UID: \"1ea76a75-e8de-4a91-89af-726df36e8a21\") " pod="openshift-marketplace/certified-operators-hjls5" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.942098 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ea76a75-e8de-4a91-89af-726df36e8a21-utilities\") pod \"certified-operators-hjls5\" (UID: \"1ea76a75-e8de-4a91-89af-726df36e8a21\") " pod="openshift-marketplace/certified-operators-hjls5" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.942139 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57717114-1abd-46bf-bdbd-0a785d734cd3-utilities\") pod \"certified-operators-lqvmp\" (UID: \"57717114-1abd-46bf-bdbd-0a785d734cd3\") " pod="openshift-marketplace/certified-operators-lqvmp" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.942159 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cksfn\" (UniqueName: \"kubernetes.io/projected/57717114-1abd-46bf-bdbd-0a785d734cd3-kube-api-access-cksfn\") pod \"certified-operators-lqvmp\" (UID: \"57717114-1abd-46bf-bdbd-0a785d734cd3\") " pod="openshift-marketplace/certified-operators-lqvmp" Dec 13 06:32:01 crc kubenswrapper[5048]: I1213 06:32:01.942179 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ea76a75-e8de-4a91-89af-726df36e8a21-catalog-content\") pod \"certified-operators-hjls5\" (UID: \"1ea76a75-e8de-4a91-89af-726df36e8a21\") " pod="openshift-marketplace/certified-operators-hjls5" Dec 13 06:32:01 crc kubenswrapper[5048]: E1213 06:32:01.942557 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:02.442541741 +0000 UTC m=+156.309136322 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.043149 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.043312 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57717114-1abd-46bf-bdbd-0a785d734cd3-utilities\") pod \"certified-operators-lqvmp\" (UID: \"57717114-1abd-46bf-bdbd-0a785d734cd3\") " pod="openshift-marketplace/certified-operators-lqvmp" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.043339 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cksfn\" (UniqueName: \"kubernetes.io/projected/57717114-1abd-46bf-bdbd-0a785d734cd3-kube-api-access-cksfn\") pod \"certified-operators-lqvmp\" (UID: \"57717114-1abd-46bf-bdbd-0a785d734cd3\") " pod="openshift-marketplace/certified-operators-lqvmp" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.043358 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ea76a75-e8de-4a91-89af-726df36e8a21-catalog-content\") pod \"certified-operators-hjls5\" (UID: \"1ea76a75-e8de-4a91-89af-726df36e8a21\") " pod="openshift-marketplace/certified-operators-hjls5" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.043400 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57717114-1abd-46bf-bdbd-0a785d734cd3-catalog-content\") pod \"certified-operators-lqvmp\" (UID: \"57717114-1abd-46bf-bdbd-0a785d734cd3\") " pod="openshift-marketplace/certified-operators-lqvmp" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.043417 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwtbg\" (UniqueName: \"kubernetes.io/projected/1ea76a75-e8de-4a91-89af-726df36e8a21-kube-api-access-cwtbg\") pod \"certified-operators-hjls5\" (UID: \"1ea76a75-e8de-4a91-89af-726df36e8a21\") " pod="openshift-marketplace/certified-operators-hjls5" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.049629 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ea76a75-e8de-4a91-89af-726df36e8a21-utilities\") pod \"certified-operators-hjls5\" (UID: \"1ea76a75-e8de-4a91-89af-726df36e8a21\") " pod="openshift-marketplace/certified-operators-hjls5" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.053029 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57717114-1abd-46bf-bdbd-0a785d734cd3-utilities\") pod \"certified-operators-lqvmp\" (UID: \"57717114-1abd-46bf-bdbd-0a785d734cd3\") " pod="openshift-marketplace/certified-operators-lqvmp" Dec 13 06:32:02 crc kubenswrapper[5048]: E1213 06:32:02.053188 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:02.553160605 +0000 UTC m=+156.419755186 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.053858 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57717114-1abd-46bf-bdbd-0a785d734cd3-catalog-content\") pod \"certified-operators-lqvmp\" (UID: \"57717114-1abd-46bf-bdbd-0a785d734cd3\") " pod="openshift-marketplace/certified-operators-lqvmp" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.054600 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ea76a75-e8de-4a91-89af-726df36e8a21-utilities\") pod \"certified-operators-hjls5\" (UID: \"1ea76a75-e8de-4a91-89af-726df36e8a21\") " pod="openshift-marketplace/certified-operators-hjls5" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.056552 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ea76a75-e8de-4a91-89af-726df36e8a21-catalog-content\") pod \"certified-operators-hjls5\" (UID: \"1ea76a75-e8de-4a91-89af-726df36e8a21\") " pod="openshift-marketplace/certified-operators-hjls5" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.147588 5048 patch_prober.go:28] interesting pod/console-operator-58897d9998-cxfwv container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.17:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.147700 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-58897d9998-cxfwv" podUID="27a55ec7-3458-4125-aacf-9c6a91b5145b" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.17:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.148316 5048 patch_prober.go:28] interesting pod/console-operator-58897d9998-cxfwv container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.17:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.148394 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-cxfwv" podUID="27a55ec7-3458-4125-aacf-9c6a91b5145b" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.17:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.152026 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:02 crc kubenswrapper[5048]: E1213 06:32:02.152621 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:02.652602161 +0000 UTC m=+156.519196742 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.156408 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cksfn\" (UniqueName: \"kubernetes.io/projected/57717114-1abd-46bf-bdbd-0a785d734cd3-kube-api-access-cksfn\") pod \"certified-operators-lqvmp\" (UID: \"57717114-1abd-46bf-bdbd-0a785d734cd3\") " pod="openshift-marketplace/certified-operators-lqvmp" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.161961 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lqvmp" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.173012 5048 patch_prober.go:28] interesting pod/router-default-5444994796-rtnpd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 13 06:32:02 crc kubenswrapper[5048]: [-]has-synced failed: reason withheld Dec 13 06:32:02 crc kubenswrapper[5048]: [+]process-running ok Dec 13 06:32:02 crc kubenswrapper[5048]: healthz check failed Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.173444 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rtnpd" podUID="54b2c6a4-d68f-4e1d-a686-626abdb6e127" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.177014 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwtbg\" (UniqueName: \"kubernetes.io/projected/1ea76a75-e8de-4a91-89af-726df36e8a21-kube-api-access-cwtbg\") pod \"certified-operators-hjls5\" (UID: \"1ea76a75-e8de-4a91-89af-726df36e8a21\") " pod="openshift-marketplace/certified-operators-hjls5" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.199172 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m4qpv" podStartSLOduration=132.199156644 podStartE2EDuration="2m12.199156644s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:02.133303325 +0000 UTC m=+155.999897926" watchObservedRunningTime="2025-12-13 06:32:02.199156644 +0000 UTC m=+156.065751225" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.210511 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-xkmc7" podStartSLOduration=132.210491967 podStartE2EDuration="2m12.210491967s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:02.196917314 +0000 UTC m=+156.063511895" watchObservedRunningTime="2025-12-13 06:32:02.210491967 +0000 UTC m=+156.077086548" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.216363 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q8ltl"] Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.258541 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:02 crc kubenswrapper[5048]: E1213 06:32:02.259062 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:02.759046454 +0000 UTC m=+156.625641035 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.266660 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-28jpz" podStartSLOduration=132.266636056 podStartE2EDuration="2m12.266636056s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:02.26079613 +0000 UTC m=+156.127390711" watchObservedRunningTime="2025-12-13 06:32:02.266636056 +0000 UTC m=+156.133230637" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.369228 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-6p4wm" podStartSLOduration=15.369211176 podStartE2EDuration="15.369211176s" podCreationTimestamp="2025-12-13 06:31:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:02.366838753 +0000 UTC m=+156.233433344" watchObservedRunningTime="2025-12-13 06:32:02.369211176 +0000 UTC m=+156.235805757" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.371111 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:02 crc kubenswrapper[5048]: E1213 06:32:02.371427 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:02.871417015 +0000 UTC m=+156.738011596 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.418921 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-cxfwv" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.490370 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-7pgnt" podStartSLOduration=132.490351412 podStartE2EDuration="2m12.490351412s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:02.448862593 +0000 UTC m=+156.315457184" watchObservedRunningTime="2025-12-13 06:32:02.490351412 +0000 UTC m=+156.356945993" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.501219 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hjls5" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.501944 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:02 crc kubenswrapper[5048]: E1213 06:32:02.502303 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:03.00226499 +0000 UTC m=+156.868859571 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.503511 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:02 crc kubenswrapper[5048]: E1213 06:32:02.504091 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:03.004078769 +0000 UTC m=+156.870673350 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.519161 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l5kdh" podStartSLOduration=132.519127131 podStartE2EDuration="2m12.519127131s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:02.518509414 +0000 UTC m=+156.385104025" watchObservedRunningTime="2025-12-13 06:32:02.519127131 +0000 UTC m=+156.385721712" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.585465 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-895rs" podStartSLOduration=132.585447181 podStartE2EDuration="2m12.585447181s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:02.584636359 +0000 UTC m=+156.451230950" watchObservedRunningTime="2025-12-13 06:32:02.585447181 +0000 UTC m=+156.452041762" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.617840 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:02 crc kubenswrapper[5048]: E1213 06:32:02.618172 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:03.118154764 +0000 UTC m=+156.984749345 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.618294 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:02 crc kubenswrapper[5048]: E1213 06:32:02.619013 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:03.118997907 +0000 UTC m=+156.985592488 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.628030 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q8ltl" event={"ID":"f7b93b87-31d6-4279-8ac9-b834417f66d9","Type":"ContainerStarted","Data":"52a8dac9ef288ba7ee8a9bfc9bea2f347c61720ea3db5461133cd227ac9b406a"} Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.628065 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" event={"ID":"1e226353-ea51-4aa1-921b-63a11a769cc7","Type":"ContainerStarted","Data":"90cec79c1080045c42b1681ed5c6947e1c81c46584e86e7593fdd0046e1315bf"} Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.628075 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hxskn" event={"ID":"058c0262-48b0-4176-9f52-20de42c46477","Type":"ContainerStarted","Data":"87c47d28ee58bcbf8ac7fa0d6586eabdf3dcd5a742aa9f3e3a534cd379cc4aed"} Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.633550 5048 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-vtxwp container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.633591 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-vtxwp" podUID="5a408a68-6f27-4655-a067-3d2b08ad5a7d" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.720054 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:02 crc kubenswrapper[5048]: E1213 06:32:02.722981 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:03.222954684 +0000 UTC m=+157.089549325 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.825698 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:02 crc kubenswrapper[5048]: E1213 06:32:02.826065 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:03.326051067 +0000 UTC m=+157.192645648 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.850365 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-x5sc8" podStartSLOduration=132.850345916 podStartE2EDuration="2m12.850345916s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:02.819663966 +0000 UTC m=+156.686258557" watchObservedRunningTime="2025-12-13 06:32:02.850345916 +0000 UTC m=+156.716940497" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.884963 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9tcn7" podStartSLOduration=132.88492841 podStartE2EDuration="2m12.88492841s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:02.853218963 +0000 UTC m=+156.719813564" watchObservedRunningTime="2025-12-13 06:32:02.88492841 +0000 UTC m=+156.751523001" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.904778 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.906573 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.918158 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.919344 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" podStartSLOduration=132.919326008 podStartE2EDuration="2m12.919326008s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:02.908278294 +0000 UTC m=+156.774872885" watchObservedRunningTime="2025-12-13 06:32:02.919326008 +0000 UTC m=+156.785920589" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.921259 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.933061 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.933508 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/43b8b15b-e50d-42e2-ae56-87a3f2aa69df-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"43b8b15b-e50d-42e2-ae56-87a3f2aa69df\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.933589 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/43b8b15b-e50d-42e2-ae56-87a3f2aa69df-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"43b8b15b-e50d-42e2-ae56-87a3f2aa69df\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 13 06:32:02 crc kubenswrapper[5048]: E1213 06:32:02.934618 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:03.434597176 +0000 UTC m=+157.301191757 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:02 crc kubenswrapper[5048]: I1213 06:32:02.940285 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.027760 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-vtxwp" podStartSLOduration=134.027741044 podStartE2EDuration="2m14.027741044s" podCreationTimestamp="2025-12-13 06:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:02.947701997 +0000 UTC m=+156.814296598" watchObservedRunningTime="2025-12-13 06:32:03.027741044 +0000 UTC m=+156.894335625" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.030864 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rb7fl"] Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.032266 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rb7fl" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.034043 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.036929 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/43b8b15b-e50d-42e2-ae56-87a3f2aa69df-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"43b8b15b-e50d-42e2-ae56-87a3f2aa69df\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.036988 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.037021 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/43b8b15b-e50d-42e2-ae56-87a3f2aa69df-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"43b8b15b-e50d-42e2-ae56-87a3f2aa69df\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.037328 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/43b8b15b-e50d-42e2-ae56-87a3f2aa69df-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"43b8b15b-e50d-42e2-ae56-87a3f2aa69df\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 13 06:32:03 crc kubenswrapper[5048]: E1213 06:32:03.037622 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:03.537610448 +0000 UTC m=+157.404205039 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.048316 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9qv2z" podStartSLOduration=133.048294593 podStartE2EDuration="2m13.048294593s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:02.982524797 +0000 UTC m=+156.849119378" watchObservedRunningTime="2025-12-13 06:32:03.048294593 +0000 UTC m=+156.914889174" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.050707 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rb7fl"] Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.052511 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-gzp2z" podStartSLOduration=16.052489775 podStartE2EDuration="16.052489775s" podCreationTimestamp="2025-12-13 06:31:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:03.038978544 +0000 UTC m=+156.905573145" watchObservedRunningTime="2025-12-13 06:32:03.052489775 +0000 UTC m=+156.919084356" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.080088 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/43b8b15b-e50d-42e2-ae56-87a3f2aa69df-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"43b8b15b-e50d-42e2-ae56-87a3f2aa69df\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.087052 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vfwpt"] Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.125222 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-lwkxb" podStartSLOduration=133.125201257 podStartE2EDuration="2m13.125201257s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:03.118630262 +0000 UTC m=+156.985224863" watchObservedRunningTime="2025-12-13 06:32:03.125201257 +0000 UTC m=+156.991795838" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.143663 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:03 crc kubenswrapper[5048]: E1213 06:32:03.144103 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:03.644084521 +0000 UTC m=+157.510679102 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.170717 5048 patch_prober.go:28] interesting pod/router-default-5444994796-rtnpd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 13 06:32:03 crc kubenswrapper[5048]: [-]has-synced failed: reason withheld Dec 13 06:32:03 crc kubenswrapper[5048]: [+]process-running ok Dec 13 06:32:03 crc kubenswrapper[5048]: healthz check failed Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.170797 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rtnpd" podUID="54b2c6a4-d68f-4e1d-a686-626abdb6e127" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.178894 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-cxfwv" podStartSLOduration=134.17885855 podStartE2EDuration="2m14.17885855s" podCreationTimestamp="2025-12-13 06:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:03.175140151 +0000 UTC m=+157.041734732" watchObservedRunningTime="2025-12-13 06:32:03.17885855 +0000 UTC m=+157.045453131" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.241738 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zrtpr" podStartSLOduration=133.241717349 podStartE2EDuration="2m13.241717349s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:03.213919387 +0000 UTC m=+157.080513978" watchObservedRunningTime="2025-12-13 06:32:03.241717349 +0000 UTC m=+157.108311940" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.246061 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbc9l\" (UniqueName: \"kubernetes.io/projected/d5fb554d-84e3-4bf0-857f-a64da6e6a36f-kube-api-access-kbc9l\") pod \"redhat-marketplace-rb7fl\" (UID: \"d5fb554d-84e3-4bf0-857f-a64da6e6a36f\") " pod="openshift-marketplace/redhat-marketplace-rb7fl" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.246151 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.246183 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5fb554d-84e3-4bf0-857f-a64da6e6a36f-utilities\") pod \"redhat-marketplace-rb7fl\" (UID: \"d5fb554d-84e3-4bf0-857f-a64da6e6a36f\") " pod="openshift-marketplace/redhat-marketplace-rb7fl" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.246202 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5fb554d-84e3-4bf0-857f-a64da6e6a36f-catalog-content\") pod \"redhat-marketplace-rb7fl\" (UID: \"d5fb554d-84e3-4bf0-857f-a64da6e6a36f\") " pod="openshift-marketplace/redhat-marketplace-rb7fl" Dec 13 06:32:03 crc kubenswrapper[5048]: E1213 06:32:03.246535 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:03.746523047 +0000 UTC m=+157.613117628 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.265416 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq" podStartSLOduration=123.265398992 podStartE2EDuration="2m3.265398992s" podCreationTimestamp="2025-12-13 06:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:03.264728614 +0000 UTC m=+157.131323195" watchObservedRunningTime="2025-12-13 06:32:03.265398992 +0000 UTC m=+157.131993573" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.330924 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dmqg7"] Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.332242 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dmqg7" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.346761 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:03 crc kubenswrapper[5048]: E1213 06:32:03.346907 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:03.846884848 +0000 UTC m=+157.713479439 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.346930 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbc9l\" (UniqueName: \"kubernetes.io/projected/d5fb554d-84e3-4bf0-857f-a64da6e6a36f-kube-api-access-kbc9l\") pod \"redhat-marketplace-rb7fl\" (UID: \"d5fb554d-84e3-4bf0-857f-a64da6e6a36f\") " pod="openshift-marketplace/redhat-marketplace-rb7fl" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.346984 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16d5a4f5-03a1-46f1-990b-f69dc88f1e9d-catalog-content\") pod \"redhat-marketplace-dmqg7\" (UID: \"16d5a4f5-03a1-46f1-990b-f69dc88f1e9d\") " pod="openshift-marketplace/redhat-marketplace-dmqg7" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.347026 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.347054 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5fb554d-84e3-4bf0-857f-a64da6e6a36f-utilities\") pod \"redhat-marketplace-rb7fl\" (UID: \"d5fb554d-84e3-4bf0-857f-a64da6e6a36f\") " pod="openshift-marketplace/redhat-marketplace-rb7fl" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.347072 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5fb554d-84e3-4bf0-857f-a64da6e6a36f-catalog-content\") pod \"redhat-marketplace-rb7fl\" (UID: \"d5fb554d-84e3-4bf0-857f-a64da6e6a36f\") " pod="openshift-marketplace/redhat-marketplace-rb7fl" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.347119 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16d5a4f5-03a1-46f1-990b-f69dc88f1e9d-utilities\") pod \"redhat-marketplace-dmqg7\" (UID: \"16d5a4f5-03a1-46f1-990b-f69dc88f1e9d\") " pod="openshift-marketplace/redhat-marketplace-dmqg7" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.347155 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4c4wp\" (UniqueName: \"kubernetes.io/projected/16d5a4f5-03a1-46f1-990b-f69dc88f1e9d-kube-api-access-4c4wp\") pod \"redhat-marketplace-dmqg7\" (UID: \"16d5a4f5-03a1-46f1-990b-f69dc88f1e9d\") " pod="openshift-marketplace/redhat-marketplace-dmqg7" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.348112 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5fb554d-84e3-4bf0-857f-a64da6e6a36f-catalog-content\") pod \"redhat-marketplace-rb7fl\" (UID: \"d5fb554d-84e3-4bf0-857f-a64da6e6a36f\") " pod="openshift-marketplace/redhat-marketplace-rb7fl" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.348536 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5fb554d-84e3-4bf0-857f-a64da6e6a36f-utilities\") pod \"redhat-marketplace-rb7fl\" (UID: \"d5fb554d-84e3-4bf0-857f-a64da6e6a36f\") " pod="openshift-marketplace/redhat-marketplace-rb7fl" Dec 13 06:32:03 crc kubenswrapper[5048]: E1213 06:32:03.349543 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:03.849525619 +0000 UTC m=+157.716120260 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.362725 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dmqg7"] Dec 13 06:32:03 crc kubenswrapper[5048]: W1213 06:32:03.372238 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod07531c82_87d1_409f_9c5a_4910633b5786.slice/crio-355278c264fb8041d9179753b9904b27876a78e7ef8d5bca3fa592911de20691 WatchSource:0}: Error finding container 355278c264fb8041d9179753b9904b27876a78e7ef8d5bca3fa592911de20691: Status 404 returned error can't find the container with id 355278c264fb8041d9179753b9904b27876a78e7ef8d5bca3fa592911de20691 Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.372388 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-x4k67" podStartSLOduration=133.372370769 podStartE2EDuration="2m13.372370769s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:03.370712324 +0000 UTC m=+157.237306935" watchObservedRunningTime="2025-12-13 06:32:03.372370769 +0000 UTC m=+157.238965370" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.392035 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbc9l\" (UniqueName: \"kubernetes.io/projected/d5fb554d-84e3-4bf0-857f-a64da6e6a36f-kube-api-access-kbc9l\") pod \"redhat-marketplace-rb7fl\" (UID: \"d5fb554d-84e3-4bf0-857f-a64da6e6a36f\") " pod="openshift-marketplace/redhat-marketplace-rb7fl" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.438487 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5qgj6" podStartSLOduration=134.438467884 podStartE2EDuration="2m14.438467884s" podCreationTimestamp="2025-12-13 06:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:03.432058773 +0000 UTC m=+157.298653354" watchObservedRunningTime="2025-12-13 06:32:03.438467884 +0000 UTC m=+157.305062465" Dec 13 06:32:03 crc kubenswrapper[5048]: E1213 06:32:03.452120 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:03.952093458 +0000 UTC m=+157.818688039 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.451084 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.455819 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16d5a4f5-03a1-46f1-990b-f69dc88f1e9d-utilities\") pod \"redhat-marketplace-dmqg7\" (UID: \"16d5a4f5-03a1-46f1-990b-f69dc88f1e9d\") " pod="openshift-marketplace/redhat-marketplace-dmqg7" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.455882 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4c4wp\" (UniqueName: \"kubernetes.io/projected/16d5a4f5-03a1-46f1-990b-f69dc88f1e9d-kube-api-access-4c4wp\") pod \"redhat-marketplace-dmqg7\" (UID: \"16d5a4f5-03a1-46f1-990b-f69dc88f1e9d\") " pod="openshift-marketplace/redhat-marketplace-dmqg7" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.455972 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16d5a4f5-03a1-46f1-990b-f69dc88f1e9d-catalog-content\") pod \"redhat-marketplace-dmqg7\" (UID: \"16d5a4f5-03a1-46f1-990b-f69dc88f1e9d\") " pod="openshift-marketplace/redhat-marketplace-dmqg7" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.456006 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:03 crc kubenswrapper[5048]: E1213 06:32:03.456309 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:03.95629805 +0000 UTC m=+157.822892631 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.456816 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16d5a4f5-03a1-46f1-990b-f69dc88f1e9d-utilities\") pod \"redhat-marketplace-dmqg7\" (UID: \"16d5a4f5-03a1-46f1-990b-f69dc88f1e9d\") " pod="openshift-marketplace/redhat-marketplace-dmqg7" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.457208 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16d5a4f5-03a1-46f1-990b-f69dc88f1e9d-catalog-content\") pod \"redhat-marketplace-dmqg7\" (UID: \"16d5a4f5-03a1-46f1-990b-f69dc88f1e9d\") " pod="openshift-marketplace/redhat-marketplace-dmqg7" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.469381 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-rtnpd" podStartSLOduration=133.469359569 podStartE2EDuration="2m13.469359569s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:03.455020356 +0000 UTC m=+157.321614947" watchObservedRunningTime="2025-12-13 06:32:03.469359569 +0000 UTC m=+157.335954160" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.477652 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.488848 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4c4wp\" (UniqueName: \"kubernetes.io/projected/16d5a4f5-03a1-46f1-990b-f69dc88f1e9d-kube-api-access-4c4wp\") pod \"redhat-marketplace-dmqg7\" (UID: \"16d5a4f5-03a1-46f1-990b-f69dc88f1e9d\") " pod="openshift-marketplace/redhat-marketplace-dmqg7" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.537124 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rb7fl" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.556955 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:03 crc kubenswrapper[5048]: E1213 06:32:03.558325 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:04.058304925 +0000 UTC m=+157.924899506 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.573554 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hjls5"] Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.584161 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dmqg7" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.611589 5048 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.632941 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-hxskn" podStartSLOduration=134.632919227 podStartE2EDuration="2m14.632919227s" podCreationTimestamp="2025-12-13 06:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:03.609639365 +0000 UTC m=+157.476233966" watchObservedRunningTime="2025-12-13 06:32:03.632919227 +0000 UTC m=+157.499513808" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.687405 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:03 crc kubenswrapper[5048]: E1213 06:32:03.687872 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:04.187856474 +0000 UTC m=+158.054451055 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.724000 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lqvmp"] Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.756704 5048 generic.go:334] "Generic (PLEG): container finished" podID="a458fdb3-0662-44a0-8df6-b81dcb66a669" containerID="9bdf2d1fabf5ccd8b64476b4f827e578a64b1d323d3bd599d2c6ac22bcdef70f" exitCode=0 Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.756783 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq" event={"ID":"a458fdb3-0662-44a0-8df6-b81dcb66a669","Type":"ContainerDied","Data":"9bdf2d1fabf5ccd8b64476b4f827e578a64b1d323d3bd599d2c6ac22bcdef70f"} Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.767509 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" event={"ID":"1e226353-ea51-4aa1-921b-63a11a769cc7","Type":"ContainerStarted","Data":"8a5d6a708b7ab2ffca3324385ffeb0cc566b879390a24a07029bee6acb393295"} Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.769485 5048 generic.go:334] "Generic (PLEG): container finished" podID="f7b93b87-31d6-4279-8ac9-b834417f66d9" containerID="34b8fba5e128eadb260a5293bac04f0217ab42c08f89733b6f1fed8b54c22b2a" exitCode=0 Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.769560 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q8ltl" event={"ID":"f7b93b87-31d6-4279-8ac9-b834417f66d9","Type":"ContainerDied","Data":"34b8fba5e128eadb260a5293bac04f0217ab42c08f89733b6f1fed8b54c22b2a"} Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.781706 5048 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.782240 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hjls5" event={"ID":"1ea76a75-e8de-4a91-89af-726df36e8a21","Type":"ContainerStarted","Data":"05c6f9bdf50bb7f4fe15c659050af614a77a95db583e634c7c4e0c09c80b1e11"} Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.789052 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:03 crc kubenswrapper[5048]: E1213 06:32:03.789391 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:04.289360916 +0000 UTC m=+158.155955497 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.789709 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.790492 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vfwpt" event={"ID":"07531c82-87d1-409f-9c5a-4910633b5786","Type":"ContainerStarted","Data":"355278c264fb8041d9179753b9904b27876a78e7ef8d5bca3fa592911de20691"} Dec 13 06:32:03 crc kubenswrapper[5048]: E1213 06:32:03.791041 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-13 06:32:04.29102939 +0000 UTC m=+158.157623971 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7z7xz" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.894035 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:03 crc kubenswrapper[5048]: E1213 06:32:03.894385 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-13 06:32:04.39436903 +0000 UTC m=+158.260963611 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.925965 5048 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-12-13T06:32:03.611621619Z","Handler":null,"Name":""} Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.948099 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-l9l5s"] Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.949098 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l9l5s" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.951389 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.978668 5048 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Dec 13 06:32:03 crc kubenswrapper[5048]: I1213 06:32:03.978702 5048 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.006982 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6885bcbf-dd86-4e5a-bd21-92395c5ae676-catalog-content\") pod \"redhat-operators-l9l5s\" (UID: \"6885bcbf-dd86-4e5a-bd21-92395c5ae676\") " pod="openshift-marketplace/redhat-operators-l9l5s" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.007063 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6885bcbf-dd86-4e5a-bd21-92395c5ae676-utilities\") pod \"redhat-operators-l9l5s\" (UID: \"6885bcbf-dd86-4e5a-bd21-92395c5ae676\") " pod="openshift-marketplace/redhat-operators-l9l5s" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.007085 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.007127 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvqcq\" (UniqueName: \"kubernetes.io/projected/6885bcbf-dd86-4e5a-bd21-92395c5ae676-kube-api-access-vvqcq\") pod \"redhat-operators-l9l5s\" (UID: \"6885bcbf-dd86-4e5a-bd21-92395c5ae676\") " pod="openshift-marketplace/redhat-operators-l9l5s" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.025416 5048 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.025469 5048 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.025760 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-l9l5s"] Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.109282 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6885bcbf-dd86-4e5a-bd21-92395c5ae676-catalog-content\") pod \"redhat-operators-l9l5s\" (UID: \"6885bcbf-dd86-4e5a-bd21-92395c5ae676\") " pod="openshift-marketplace/redhat-operators-l9l5s" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.109648 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6885bcbf-dd86-4e5a-bd21-92395c5ae676-utilities\") pod \"redhat-operators-l9l5s\" (UID: \"6885bcbf-dd86-4e5a-bd21-92395c5ae676\") " pod="openshift-marketplace/redhat-operators-l9l5s" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.109724 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvqcq\" (UniqueName: \"kubernetes.io/projected/6885bcbf-dd86-4e5a-bd21-92395c5ae676-kube-api-access-vvqcq\") pod \"redhat-operators-l9l5s\" (UID: \"6885bcbf-dd86-4e5a-bd21-92395c5ae676\") " pod="openshift-marketplace/redhat-operators-l9l5s" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.110911 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6885bcbf-dd86-4e5a-bd21-92395c5ae676-catalog-content\") pod \"redhat-operators-l9l5s\" (UID: \"6885bcbf-dd86-4e5a-bd21-92395c5ae676\") " pod="openshift-marketplace/redhat-operators-l9l5s" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.111187 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6885bcbf-dd86-4e5a-bd21-92395c5ae676-utilities\") pod \"redhat-operators-l9l5s\" (UID: \"6885bcbf-dd86-4e5a-bd21-92395c5ae676\") " pod="openshift-marketplace/redhat-operators-l9l5s" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.144810 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvqcq\" (UniqueName: \"kubernetes.io/projected/6885bcbf-dd86-4e5a-bd21-92395c5ae676-kube-api-access-vvqcq\") pod \"redhat-operators-l9l5s\" (UID: \"6885bcbf-dd86-4e5a-bd21-92395c5ae676\") " pod="openshift-marketplace/redhat-operators-l9l5s" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.162719 5048 patch_prober.go:28] interesting pod/router-default-5444994796-rtnpd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 13 06:32:04 crc kubenswrapper[5048]: [-]has-synced failed: reason withheld Dec 13 06:32:04 crc kubenswrapper[5048]: [+]process-running ok Dec 13 06:32:04 crc kubenswrapper[5048]: healthz check failed Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.162770 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rtnpd" podUID="54b2c6a4-d68f-4e1d-a686-626abdb6e127" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.213803 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.215517 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.217342 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.230229 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.236126 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7z7xz\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.239494 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rb7fl"] Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.242515 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.249616 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.250659 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.259669 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.259863 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.319969 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.320281 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.332577 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9fb25d1d-3347-47fc-aae0-2fb03a1ad397-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"9fb25d1d-3347-47fc-aae0-2fb03a1ad397\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.332765 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9fb25d1d-3347-47fc-aae0-2fb03a1ad397-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"9fb25d1d-3347-47fc-aae0-2fb03a1ad397\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 13 06:32:04 crc kubenswrapper[5048]: W1213 06:32:04.339660 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod43b8b15b_e50d_42e2_ae56_87a3f2aa69df.slice/crio-373565c307b76ea9176e8fef28dfc2483b24a838cd4a24b9d4334a96d089d521 WatchSource:0}: Error finding container 373565c307b76ea9176e8fef28dfc2483b24a838cd4a24b9d4334a96d089d521: Status 404 returned error can't find the container with id 373565c307b76ea9176e8fef28dfc2483b24a838cd4a24b9d4334a96d089d521 Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.362470 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.370397 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-k9jhn"] Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.373308 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-k9jhn"] Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.373490 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k9jhn" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.387064 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l9l5s" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.427486 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-vtxwp" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.438484 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9fb25d1d-3347-47fc-aae0-2fb03a1ad397-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"9fb25d1d-3347-47fc-aae0-2fb03a1ad397\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.438559 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87790266-395a-4687-895f-f524c8b2171b-utilities\") pod \"redhat-operators-k9jhn\" (UID: \"87790266-395a-4687-895f-f524c8b2171b\") " pod="openshift-marketplace/redhat-operators-k9jhn" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.438584 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9fb25d1d-3347-47fc-aae0-2fb03a1ad397-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"9fb25d1d-3347-47fc-aae0-2fb03a1ad397\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.438623 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-895rz\" (UniqueName: \"kubernetes.io/projected/87790266-395a-4687-895f-f524c8b2171b-kube-api-access-895rz\") pod \"redhat-operators-k9jhn\" (UID: \"87790266-395a-4687-895f-f524c8b2171b\") " pod="openshift-marketplace/redhat-operators-k9jhn" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.438647 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87790266-395a-4687-895f-f524c8b2171b-catalog-content\") pod \"redhat-operators-k9jhn\" (UID: \"87790266-395a-4687-895f-f524c8b2171b\") " pod="openshift-marketplace/redhat-operators-k9jhn" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.438745 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9fb25d1d-3347-47fc-aae0-2fb03a1ad397-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"9fb25d1d-3347-47fc-aae0-2fb03a1ad397\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.464749 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9fb25d1d-3347-47fc-aae0-2fb03a1ad397-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"9fb25d1d-3347-47fc-aae0-2fb03a1ad397\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.540086 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-895rz\" (UniqueName: \"kubernetes.io/projected/87790266-395a-4687-895f-f524c8b2171b-kube-api-access-895rz\") pod \"redhat-operators-k9jhn\" (UID: \"87790266-395a-4687-895f-f524c8b2171b\") " pod="openshift-marketplace/redhat-operators-k9jhn" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.540143 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87790266-395a-4687-895f-f524c8b2171b-catalog-content\") pod \"redhat-operators-k9jhn\" (UID: \"87790266-395a-4687-895f-f524c8b2171b\") " pod="openshift-marketplace/redhat-operators-k9jhn" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.540231 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87790266-395a-4687-895f-f524c8b2171b-utilities\") pod \"redhat-operators-k9jhn\" (UID: \"87790266-395a-4687-895f-f524c8b2171b\") " pod="openshift-marketplace/redhat-operators-k9jhn" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.541009 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87790266-395a-4687-895f-f524c8b2171b-catalog-content\") pod \"redhat-operators-k9jhn\" (UID: \"87790266-395a-4687-895f-f524c8b2171b\") " pod="openshift-marketplace/redhat-operators-k9jhn" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.541256 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87790266-395a-4687-895f-f524c8b2171b-utilities\") pod \"redhat-operators-k9jhn\" (UID: \"87790266-395a-4687-895f-f524c8b2171b\") " pod="openshift-marketplace/redhat-operators-k9jhn" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.573705 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-895rz\" (UniqueName: \"kubernetes.io/projected/87790266-395a-4687-895f-f524c8b2171b-kube-api-access-895rz\") pod \"redhat-operators-k9jhn\" (UID: \"87790266-395a-4687-895f-f524c8b2171b\") " pod="openshift-marketplace/redhat-operators-k9jhn" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.599035 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.627099 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.774844 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dmqg7"] Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.793148 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-7z7xz"] Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.816177 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rb7fl" event={"ID":"d5fb554d-84e3-4bf0-857f-a64da6e6a36f","Type":"ContainerStarted","Data":"838fb14a68a00c979122dd607a09c8c6b8d24e909176a2aacb01a247003a23fa"} Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.827620 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmqg7" event={"ID":"16d5a4f5-03a1-46f1-990b-f69dc88f1e9d","Type":"ContainerStarted","Data":"a14133a8445df7195097fd3577b4e03687aabc9ec38c4e4f4e0fbab02ec7a3a9"} Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.832020 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lqvmp" event={"ID":"57717114-1abd-46bf-bdbd-0a785d734cd3","Type":"ContainerStarted","Data":"44c223a9dc9877cdb14998758b183038769841a2039d90d77d127efb13283922"} Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.833375 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"43b8b15b-e50d-42e2-ae56-87a3f2aa69df","Type":"ContainerStarted","Data":"373565c307b76ea9176e8fef28dfc2483b24a838cd4a24b9d4334a96d089d521"} Dec 13 06:32:04 crc kubenswrapper[5048]: W1213 06:32:04.837278 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod36ee331b_baa0_42ac_9bd3_7c52253814e1.slice/crio-a4cfa6a4403dbf348ca3a506785e52511dcd2998f028eeeec1d2470a5d48d3d1 WatchSource:0}: Error finding container a4cfa6a4403dbf348ca3a506785e52511dcd2998f028eeeec1d2470a5d48d3d1: Status 404 returned error can't find the container with id a4cfa6a4403dbf348ca3a506785e52511dcd2998f028eeeec1d2470a5d48d3d1 Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.848754 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ngbkz" Dec 13 06:32:04 crc kubenswrapper[5048]: I1213 06:32:04.909061 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k9jhn" Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.161335 5048 patch_prober.go:28] interesting pod/router-default-5444994796-rtnpd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 13 06:32:05 crc kubenswrapper[5048]: [-]has-synced failed: reason withheld Dec 13 06:32:05 crc kubenswrapper[5048]: [+]process-running ok Dec 13 06:32:05 crc kubenswrapper[5048]: healthz check failed Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.161686 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rtnpd" podUID="54b2c6a4-d68f-4e1d-a686-626abdb6e127" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.220866 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.232374 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-l9l5s"] Dec 13 06:32:05 crc kubenswrapper[5048]: W1213 06:32:05.237338 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod9fb25d1d_3347_47fc_aae0_2fb03a1ad397.slice/crio-8931031491d25568e12002c034c52f6d97c58f271b8e1c1a3e4788bfa6abc947 WatchSource:0}: Error finding container 8931031491d25568e12002c034c52f6d97c58f271b8e1c1a3e4788bfa6abc947: Status 404 returned error can't find the container with id 8931031491d25568e12002c034c52f6d97c58f271b8e1c1a3e4788bfa6abc947 Dec 13 06:32:05 crc kubenswrapper[5048]: W1213 06:32:05.255279 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6885bcbf_dd86_4e5a_bd21_92395c5ae676.slice/crio-0d3727a8ba5f468e9ef2575be6348dad212cf60449af253ac7d6960ae044eab9 WatchSource:0}: Error finding container 0d3727a8ba5f468e9ef2575be6348dad212cf60449af253ac7d6960ae044eab9: Status 404 returned error can't find the container with id 0d3727a8ba5f468e9ef2575be6348dad212cf60449af253ac7d6960ae044eab9 Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.351866 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.352192 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.440527 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq" Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.471728 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a458fdb3-0662-44a0-8df6-b81dcb66a669-secret-volume\") pod \"a458fdb3-0662-44a0-8df6-b81dcb66a669\" (UID: \"a458fdb3-0662-44a0-8df6-b81dcb66a669\") " Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.471852 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a458fdb3-0662-44a0-8df6-b81dcb66a669-config-volume\") pod \"a458fdb3-0662-44a0-8df6-b81dcb66a669\" (UID: \"a458fdb3-0662-44a0-8df6-b81dcb66a669\") " Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.471891 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d5v67\" (UniqueName: \"kubernetes.io/projected/a458fdb3-0662-44a0-8df6-b81dcb66a669-kube-api-access-d5v67\") pod \"a458fdb3-0662-44a0-8df6-b81dcb66a669\" (UID: \"a458fdb3-0662-44a0-8df6-b81dcb66a669\") " Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.472748 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a458fdb3-0662-44a0-8df6-b81dcb66a669-config-volume" (OuterVolumeSpecName: "config-volume") pod "a458fdb3-0662-44a0-8df6-b81dcb66a669" (UID: "a458fdb3-0662-44a0-8df6-b81dcb66a669"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.488096 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a458fdb3-0662-44a0-8df6-b81dcb66a669-kube-api-access-d5v67" (OuterVolumeSpecName: "kube-api-access-d5v67") pod "a458fdb3-0662-44a0-8df6-b81dcb66a669" (UID: "a458fdb3-0662-44a0-8df6-b81dcb66a669"). InnerVolumeSpecName "kube-api-access-d5v67". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.496086 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a458fdb3-0662-44a0-8df6-b81dcb66a669-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "a458fdb3-0662-44a0-8df6-b81dcb66a669" (UID: "a458fdb3-0662-44a0-8df6-b81dcb66a669"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.573378 5048 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a458fdb3-0662-44a0-8df6-b81dcb66a669-config-volume\") on node \"crc\" DevicePath \"\"" Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.573414 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d5v67\" (UniqueName: \"kubernetes.io/projected/a458fdb3-0662-44a0-8df6-b81dcb66a669-kube-api-access-d5v67\") on node \"crc\" DevicePath \"\"" Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.573426 5048 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a458fdb3-0662-44a0-8df6-b81dcb66a669-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.602096 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-k9jhn"] Dec 13 06:32:05 crc kubenswrapper[5048]: W1213 06:32:05.742272 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod87790266_395a_4687_895f_f524c8b2171b.slice/crio-016d7a69702908f4c58036c466ae7bbd4d4c6313aa0f9fc8462e7e46b54d26de WatchSource:0}: Error finding container 016d7a69702908f4c58036c466ae7bbd4d4c6313aa0f9fc8462e7e46b54d26de: Status 404 returned error can't find the container with id 016d7a69702908f4c58036c466ae7bbd4d4c6313aa0f9fc8462e7e46b54d26de Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.849140 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"9fb25d1d-3347-47fc-aae0-2fb03a1ad397","Type":"ContainerStarted","Data":"8931031491d25568e12002c034c52f6d97c58f271b8e1c1a3e4788bfa6abc947"} Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.850647 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l9l5s" event={"ID":"6885bcbf-dd86-4e5a-bd21-92395c5ae676","Type":"ContainerStarted","Data":"0d3727a8ba5f468e9ef2575be6348dad212cf60449af253ac7d6960ae044eab9"} Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.853173 5048 generic.go:334] "Generic (PLEG): container finished" podID="57717114-1abd-46bf-bdbd-0a785d734cd3" containerID="6877c0777b3ec2b3c40ceb70f2d4d77c7fd493eb2ef816b54d5d7006a0b3ea2b" exitCode=0 Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.853248 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lqvmp" event={"ID":"57717114-1abd-46bf-bdbd-0a785d734cd3","Type":"ContainerDied","Data":"6877c0777b3ec2b3c40ceb70f2d4d77c7fd493eb2ef816b54d5d7006a0b3ea2b"} Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.855234 5048 generic.go:334] "Generic (PLEG): container finished" podID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" containerID="4cc2b400c561cf80f3bb04bd290206deb73d14dcd8dd41468ca7e5159f5101e5" exitCode=0 Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.855294 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rb7fl" event={"ID":"d5fb554d-84e3-4bf0-857f-a64da6e6a36f","Type":"ContainerDied","Data":"4cc2b400c561cf80f3bb04bd290206deb73d14dcd8dd41468ca7e5159f5101e5"} Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.868223 5048 generic.go:334] "Generic (PLEG): container finished" podID="07531c82-87d1-409f-9c5a-4910633b5786" containerID="8cf9ecd8091677719bf3d3dd416aeba117709026e6176183dc61197e6b968a38" exitCode=0 Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.868314 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vfwpt" event={"ID":"07531c82-87d1-409f-9c5a-4910633b5786","Type":"ContainerDied","Data":"8cf9ecd8091677719bf3d3dd416aeba117709026e6176183dc61197e6b968a38"} Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.871304 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq" event={"ID":"a458fdb3-0662-44a0-8df6-b81dcb66a669","Type":"ContainerDied","Data":"da410895572ee187110a74cdc92a81e356d3eb3321a1e243b744e5ee0578ea06"} Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.871336 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da410895572ee187110a74cdc92a81e356d3eb3321a1e243b744e5ee0578ea06" Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.871477 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq" Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.876418 5048 generic.go:334] "Generic (PLEG): container finished" podID="16d5a4f5-03a1-46f1-990b-f69dc88f1e9d" containerID="2f40cb1d2d9dfa68a91e9a84c8f3d224e3f1d8b8029a5af32b5d7cf36276fe07" exitCode=0 Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.876520 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmqg7" event={"ID":"16d5a4f5-03a1-46f1-990b-f69dc88f1e9d","Type":"ContainerDied","Data":"2f40cb1d2d9dfa68a91e9a84c8f3d224e3f1d8b8029a5af32b5d7cf36276fe07"} Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.888490 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k9jhn" event={"ID":"87790266-395a-4687-895f-f524c8b2171b","Type":"ContainerStarted","Data":"016d7a69702908f4c58036c466ae7bbd4d4c6313aa0f9fc8462e7e46b54d26de"} Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.893002 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" event={"ID":"1e226353-ea51-4aa1-921b-63a11a769cc7","Type":"ContainerStarted","Data":"133de2af8a0749fe8b3b6010a90b1da79e1aa59fd788ed7afa75ca817736c9a4"} Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.894286 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" event={"ID":"36ee331b-baa0-42ac-9bd3-7c52253814e1","Type":"ContainerStarted","Data":"01798a848041919b152c6308fa93ba8571978c608dfe2fa259c63306f76f0669"} Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.894310 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" event={"ID":"36ee331b-baa0-42ac-9bd3-7c52253814e1","Type":"ContainerStarted","Data":"a4cfa6a4403dbf348ca3a506785e52511dcd2998f028eeeec1d2470a5d48d3d1"} Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.896623 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"43b8b15b-e50d-42e2-ae56-87a3f2aa69df","Type":"ContainerStarted","Data":"96e29a9f97a1056db8b86042a4bff12c07c40a502742d0abe1b8fbfe70b28d5f"} Dec 13 06:32:05 crc kubenswrapper[5048]: I1213 06:32:05.898877 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hjls5" event={"ID":"1ea76a75-e8de-4a91-89af-726df36e8a21","Type":"ContainerStarted","Data":"9d1869b8037dded4931b25b72642998e3775101982ab667e4a65d65b274e2d41"} Dec 13 06:32:06 crc kubenswrapper[5048]: I1213 06:32:06.156142 5048 patch_prober.go:28] interesting pod/router-default-5444994796-rtnpd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 13 06:32:06 crc kubenswrapper[5048]: [-]has-synced failed: reason withheld Dec 13 06:32:06 crc kubenswrapper[5048]: [+]process-running ok Dec 13 06:32:06 crc kubenswrapper[5048]: healthz check failed Dec 13 06:32:06 crc kubenswrapper[5048]: I1213 06:32:06.156479 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rtnpd" podUID="54b2c6a4-d68f-4e1d-a686-626abdb6e127" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 13 06:32:06 crc kubenswrapper[5048]: I1213 06:32:06.366273 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-6p4wm" Dec 13 06:32:06 crc kubenswrapper[5048]: I1213 06:32:06.697562 5048 patch_prober.go:28] interesting pod/apiserver-76f77b778f-hxskn container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Dec 13 06:32:06 crc kubenswrapper[5048]: [+]log ok Dec 13 06:32:06 crc kubenswrapper[5048]: [+]etcd ok Dec 13 06:32:06 crc kubenswrapper[5048]: [+]poststarthook/start-apiserver-admission-initializer ok Dec 13 06:32:06 crc kubenswrapper[5048]: [+]poststarthook/generic-apiserver-start-informers ok Dec 13 06:32:06 crc kubenswrapper[5048]: [+]poststarthook/max-in-flight-filter ok Dec 13 06:32:06 crc kubenswrapper[5048]: [+]poststarthook/storage-object-count-tracker-hook ok Dec 13 06:32:06 crc kubenswrapper[5048]: [+]poststarthook/image.openshift.io-apiserver-caches ok Dec 13 06:32:06 crc kubenswrapper[5048]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Dec 13 06:32:06 crc kubenswrapper[5048]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Dec 13 06:32:06 crc kubenswrapper[5048]: [+]poststarthook/project.openshift.io-projectcache ok Dec 13 06:32:06 crc kubenswrapper[5048]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Dec 13 06:32:06 crc kubenswrapper[5048]: [-]poststarthook/openshift.io-startinformers failed: reason withheld Dec 13 06:32:06 crc kubenswrapper[5048]: [+]poststarthook/openshift.io-restmapperupdater ok Dec 13 06:32:06 crc kubenswrapper[5048]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Dec 13 06:32:06 crc kubenswrapper[5048]: livez check failed Dec 13 06:32:06 crc kubenswrapper[5048]: I1213 06:32:06.697669 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-hxskn" podUID="058c0262-48b0-4176-9f52-20de42c46477" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 13 06:32:06 crc kubenswrapper[5048]: I1213 06:32:06.916384 5048 generic.go:334] "Generic (PLEG): container finished" podID="87790266-395a-4687-895f-f524c8b2171b" containerID="13dbd6ab48b35ce49c0a111ba45edf5d061a07c76da1475d3f49afa279f38915" exitCode=0 Dec 13 06:32:06 crc kubenswrapper[5048]: I1213 06:32:06.916501 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k9jhn" event={"ID":"87790266-395a-4687-895f-f524c8b2171b","Type":"ContainerDied","Data":"13dbd6ab48b35ce49c0a111ba45edf5d061a07c76da1475d3f49afa279f38915"} Dec 13 06:32:06 crc kubenswrapper[5048]: I1213 06:32:06.922349 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"9fb25d1d-3347-47fc-aae0-2fb03a1ad397","Type":"ContainerStarted","Data":"167ac7fb2504a655d58973510f8013c556882f4af08bd9fcb95ee6834a83a258"} Dec 13 06:32:06 crc kubenswrapper[5048]: I1213 06:32:06.939712 5048 generic.go:334] "Generic (PLEG): container finished" podID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" containerID="ddd368b360bba86c9c78307cf7fe5707ca0649ff3e24f3b87ba8bc8f2b36fc8f" exitCode=0 Dec 13 06:32:06 crc kubenswrapper[5048]: I1213 06:32:06.939820 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l9l5s" event={"ID":"6885bcbf-dd86-4e5a-bd21-92395c5ae676","Type":"ContainerDied","Data":"ddd368b360bba86c9c78307cf7fe5707ca0649ff3e24f3b87ba8bc8f2b36fc8f"} Dec 13 06:32:06 crc kubenswrapper[5048]: I1213 06:32:06.948585 5048 generic.go:334] "Generic (PLEG): container finished" podID="43b8b15b-e50d-42e2-ae56-87a3f2aa69df" containerID="96e29a9f97a1056db8b86042a4bff12c07c40a502742d0abe1b8fbfe70b28d5f" exitCode=0 Dec 13 06:32:06 crc kubenswrapper[5048]: I1213 06:32:06.948686 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"43b8b15b-e50d-42e2-ae56-87a3f2aa69df","Type":"ContainerDied","Data":"96e29a9f97a1056db8b86042a4bff12c07c40a502742d0abe1b8fbfe70b28d5f"} Dec 13 06:32:06 crc kubenswrapper[5048]: I1213 06:32:06.956483 5048 generic.go:334] "Generic (PLEG): container finished" podID="1ea76a75-e8de-4a91-89af-726df36e8a21" containerID="9d1869b8037dded4931b25b72642998e3775101982ab667e4a65d65b274e2d41" exitCode=0 Dec 13 06:32:06 crc kubenswrapper[5048]: I1213 06:32:06.973715 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hjls5" event={"ID":"1ea76a75-e8de-4a91-89af-726df36e8a21","Type":"ContainerDied","Data":"9d1869b8037dded4931b25b72642998e3775101982ab667e4a65d65b274e2d41"} Dec 13 06:32:06 crc kubenswrapper[5048]: I1213 06:32:06.974902 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:07 crc kubenswrapper[5048]: I1213 06:32:07.031141 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=3.031115168 podStartE2EDuration="3.031115168s" podCreationTimestamp="2025-12-13 06:32:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:07.003533141 +0000 UTC m=+160.870127722" watchObservedRunningTime="2025-12-13 06:32:07.031115168 +0000 UTC m=+160.897709759" Dec 13 06:32:07 crc kubenswrapper[5048]: I1213 06:32:07.153753 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-k7pxh" podStartSLOduration=20.153736993 podStartE2EDuration="20.153736993s" podCreationTimestamp="2025-12-13 06:31:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:07.150972779 +0000 UTC m=+161.017567400" watchObservedRunningTime="2025-12-13 06:32:07.153736993 +0000 UTC m=+161.020331574" Dec 13 06:32:07 crc kubenswrapper[5048]: I1213 06:32:07.158399 5048 patch_prober.go:28] interesting pod/router-default-5444994796-rtnpd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 13 06:32:07 crc kubenswrapper[5048]: [-]has-synced failed: reason withheld Dec 13 06:32:07 crc kubenswrapper[5048]: [+]process-running ok Dec 13 06:32:07 crc kubenswrapper[5048]: healthz check failed Dec 13 06:32:07 crc kubenswrapper[5048]: I1213 06:32:07.158468 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rtnpd" podUID="54b2c6a4-d68f-4e1d-a686-626abdb6e127" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 13 06:32:07 crc kubenswrapper[5048]: I1213 06:32:07.188093 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" podStartSLOduration=137.188049879 podStartE2EDuration="2m17.188049879s" podCreationTimestamp="2025-12-13 06:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:32:07.176205683 +0000 UTC m=+161.042800294" watchObservedRunningTime="2025-12-13 06:32:07.188049879 +0000 UTC m=+161.054644460" Dec 13 06:32:08 crc kubenswrapper[5048]: I1213 06:32:08.175192 5048 patch_prober.go:28] interesting pod/router-default-5444994796-rtnpd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 13 06:32:08 crc kubenswrapper[5048]: [-]has-synced failed: reason withheld Dec 13 06:32:08 crc kubenswrapper[5048]: [+]process-running ok Dec 13 06:32:08 crc kubenswrapper[5048]: healthz check failed Dec 13 06:32:08 crc kubenswrapper[5048]: I1213 06:32:08.175258 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rtnpd" podUID="54b2c6a4-d68f-4e1d-a686-626abdb6e127" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 13 06:32:08 crc kubenswrapper[5048]: I1213 06:32:08.283207 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 13 06:32:08 crc kubenswrapper[5048]: I1213 06:32:08.353996 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/43b8b15b-e50d-42e2-ae56-87a3f2aa69df-kube-api-access\") pod \"43b8b15b-e50d-42e2-ae56-87a3f2aa69df\" (UID: \"43b8b15b-e50d-42e2-ae56-87a3f2aa69df\") " Dec 13 06:32:08 crc kubenswrapper[5048]: I1213 06:32:08.354187 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/43b8b15b-e50d-42e2-ae56-87a3f2aa69df-kubelet-dir\") pod \"43b8b15b-e50d-42e2-ae56-87a3f2aa69df\" (UID: \"43b8b15b-e50d-42e2-ae56-87a3f2aa69df\") " Dec 13 06:32:08 crc kubenswrapper[5048]: I1213 06:32:08.354301 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/43b8b15b-e50d-42e2-ae56-87a3f2aa69df-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "43b8b15b-e50d-42e2-ae56-87a3f2aa69df" (UID: "43b8b15b-e50d-42e2-ae56-87a3f2aa69df"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:32:08 crc kubenswrapper[5048]: I1213 06:32:08.354793 5048 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/43b8b15b-e50d-42e2-ae56-87a3f2aa69df-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 13 06:32:08 crc kubenswrapper[5048]: I1213 06:32:08.361113 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43b8b15b-e50d-42e2-ae56-87a3f2aa69df-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "43b8b15b-e50d-42e2-ae56-87a3f2aa69df" (UID: "43b8b15b-e50d-42e2-ae56-87a3f2aa69df"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:32:08 crc kubenswrapper[5048]: I1213 06:32:08.456463 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/43b8b15b-e50d-42e2-ae56-87a3f2aa69df-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 13 06:32:08 crc kubenswrapper[5048]: I1213 06:32:08.991056 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"43b8b15b-e50d-42e2-ae56-87a3f2aa69df","Type":"ContainerDied","Data":"373565c307b76ea9176e8fef28dfc2483b24a838cd4a24b9d4334a96d089d521"} Dec 13 06:32:08 crc kubenswrapper[5048]: I1213 06:32:08.991120 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="373565c307b76ea9176e8fef28dfc2483b24a838cd4a24b9d4334a96d089d521" Dec 13 06:32:08 crc kubenswrapper[5048]: I1213 06:32:08.991247 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 13 06:32:09 crc kubenswrapper[5048]: I1213 06:32:09.157177 5048 patch_prober.go:28] interesting pod/router-default-5444994796-rtnpd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 13 06:32:09 crc kubenswrapper[5048]: [-]has-synced failed: reason withheld Dec 13 06:32:09 crc kubenswrapper[5048]: [+]process-running ok Dec 13 06:32:09 crc kubenswrapper[5048]: healthz check failed Dec 13 06:32:09 crc kubenswrapper[5048]: I1213 06:32:09.157245 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rtnpd" podUID="54b2c6a4-d68f-4e1d-a686-626abdb6e127" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 13 06:32:09 crc kubenswrapper[5048]: I1213 06:32:09.374219 5048 patch_prober.go:28] interesting pod/console-f9d7485db-5nt4n container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.8:8443/health\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Dec 13 06:32:09 crc kubenswrapper[5048]: I1213 06:32:09.374308 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-5nt4n" podUID="1de0ebfd-b283-4790-badb-fb78d80e6703" containerName="console" probeResult="failure" output="Get \"https://10.217.0.8:8443/health\": dial tcp 10.217.0.8:8443: connect: connection refused" Dec 13 06:32:10 crc kubenswrapper[5048]: I1213 06:32:10.155883 5048 patch_prober.go:28] interesting pod/router-default-5444994796-rtnpd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 13 06:32:10 crc kubenswrapper[5048]: [-]has-synced failed: reason withheld Dec 13 06:32:10 crc kubenswrapper[5048]: [+]process-running ok Dec 13 06:32:10 crc kubenswrapper[5048]: healthz check failed Dec 13 06:32:10 crc kubenswrapper[5048]: I1213 06:32:10.155948 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rtnpd" podUID="54b2c6a4-d68f-4e1d-a686-626abdb6e127" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 13 06:32:10 crc kubenswrapper[5048]: I1213 06:32:10.352501 5048 patch_prober.go:28] interesting pod/downloads-7954f5f757-xclb7 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Dec 13 06:32:10 crc kubenswrapper[5048]: I1213 06:32:10.352908 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-xclb7" podUID="f567d62b-7941-466c-84c8-06f6854000ba" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Dec 13 06:32:10 crc kubenswrapper[5048]: I1213 06:32:10.352553 5048 patch_prober.go:28] interesting pod/downloads-7954f5f757-xclb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Dec 13 06:32:10 crc kubenswrapper[5048]: I1213 06:32:10.353011 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xclb7" podUID="f567d62b-7941-466c-84c8-06f6854000ba" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Dec 13 06:32:10 crc kubenswrapper[5048]: I1213 06:32:10.357327 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:32:10 crc kubenswrapper[5048]: I1213 06:32:10.368117 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-hxskn" Dec 13 06:32:11 crc kubenswrapper[5048]: I1213 06:32:11.155731 5048 patch_prober.go:28] interesting pod/router-default-5444994796-rtnpd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 13 06:32:11 crc kubenswrapper[5048]: [-]has-synced failed: reason withheld Dec 13 06:32:11 crc kubenswrapper[5048]: [+]process-running ok Dec 13 06:32:11 crc kubenswrapper[5048]: healthz check failed Dec 13 06:32:11 crc kubenswrapper[5048]: I1213 06:32:11.155800 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rtnpd" podUID="54b2c6a4-d68f-4e1d-a686-626abdb6e127" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 13 06:32:12 crc kubenswrapper[5048]: I1213 06:32:12.154951 5048 patch_prober.go:28] interesting pod/router-default-5444994796-rtnpd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 13 06:32:12 crc kubenswrapper[5048]: [-]has-synced failed: reason withheld Dec 13 06:32:12 crc kubenswrapper[5048]: [+]process-running ok Dec 13 06:32:12 crc kubenswrapper[5048]: healthz check failed Dec 13 06:32:12 crc kubenswrapper[5048]: I1213 06:32:12.155215 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rtnpd" podUID="54b2c6a4-d68f-4e1d-a686-626abdb6e127" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 13 06:32:13 crc kubenswrapper[5048]: I1213 06:32:13.143840 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs\") pod \"network-metrics-daemon-tm62z\" (UID: \"226b24e2-92c6-43d1-a621-09702ffa8fd4\") " pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:32:13 crc kubenswrapper[5048]: I1213 06:32:13.152453 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/226b24e2-92c6-43d1-a621-09702ffa8fd4-metrics-certs\") pod \"network-metrics-daemon-tm62z\" (UID: \"226b24e2-92c6-43d1-a621-09702ffa8fd4\") " pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:32:13 crc kubenswrapper[5048]: I1213 06:32:13.155025 5048 patch_prober.go:28] interesting pod/router-default-5444994796-rtnpd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 13 06:32:13 crc kubenswrapper[5048]: [-]has-synced failed: reason withheld Dec 13 06:32:13 crc kubenswrapper[5048]: [+]process-running ok Dec 13 06:32:13 crc kubenswrapper[5048]: healthz check failed Dec 13 06:32:13 crc kubenswrapper[5048]: I1213 06:32:13.155123 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rtnpd" podUID="54b2c6a4-d68f-4e1d-a686-626abdb6e127" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 13 06:32:13 crc kubenswrapper[5048]: I1213 06:32:13.271702 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tm62z" Dec 13 06:32:13 crc kubenswrapper[5048]: I1213 06:32:13.561421 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-tm62z"] Dec 13 06:32:14 crc kubenswrapper[5048]: I1213 06:32:14.157349 5048 patch_prober.go:28] interesting pod/router-default-5444994796-rtnpd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 13 06:32:14 crc kubenswrapper[5048]: [-]has-synced failed: reason withheld Dec 13 06:32:14 crc kubenswrapper[5048]: [+]process-running ok Dec 13 06:32:14 crc kubenswrapper[5048]: healthz check failed Dec 13 06:32:14 crc kubenswrapper[5048]: I1213 06:32:14.157896 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rtnpd" podUID="54b2c6a4-d68f-4e1d-a686-626abdb6e127" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 13 06:32:14 crc kubenswrapper[5048]: W1213 06:32:14.567910 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod226b24e2_92c6_43d1_a621_09702ffa8fd4.slice/crio-aafa68ca3b54fab9235c8b362ed941bf09879e4d2f7232bcb6545d7fdb390a65 WatchSource:0}: Error finding container aafa68ca3b54fab9235c8b362ed941bf09879e4d2f7232bcb6545d7fdb390a65: Status 404 returned error can't find the container with id aafa68ca3b54fab9235c8b362ed941bf09879e4d2f7232bcb6545d7fdb390a65 Dec 13 06:32:15 crc kubenswrapper[5048]: I1213 06:32:15.154838 5048 patch_prober.go:28] interesting pod/router-default-5444994796-rtnpd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 13 06:32:15 crc kubenswrapper[5048]: [-]has-synced failed: reason withheld Dec 13 06:32:15 crc kubenswrapper[5048]: [+]process-running ok Dec 13 06:32:15 crc kubenswrapper[5048]: healthz check failed Dec 13 06:32:15 crc kubenswrapper[5048]: I1213 06:32:15.154946 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rtnpd" podUID="54b2c6a4-d68f-4e1d-a686-626abdb6e127" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 13 06:32:15 crc kubenswrapper[5048]: I1213 06:32:15.573062 5048 generic.go:334] "Generic (PLEG): container finished" podID="9fb25d1d-3347-47fc-aae0-2fb03a1ad397" containerID="167ac7fb2504a655d58973510f8013c556882f4af08bd9fcb95ee6834a83a258" exitCode=0 Dec 13 06:32:15 crc kubenswrapper[5048]: I1213 06:32:15.573104 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"9fb25d1d-3347-47fc-aae0-2fb03a1ad397","Type":"ContainerDied","Data":"167ac7fb2504a655d58973510f8013c556882f4af08bd9fcb95ee6834a83a258"} Dec 13 06:32:15 crc kubenswrapper[5048]: I1213 06:32:15.575113 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-tm62z" event={"ID":"226b24e2-92c6-43d1-a621-09702ffa8fd4","Type":"ContainerStarted","Data":"aafa68ca3b54fab9235c8b362ed941bf09879e4d2f7232bcb6545d7fdb390a65"} Dec 13 06:32:16 crc kubenswrapper[5048]: I1213 06:32:16.169409 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-rtnpd" Dec 13 06:32:16 crc kubenswrapper[5048]: I1213 06:32:16.173723 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-rtnpd" Dec 13 06:32:16 crc kubenswrapper[5048]: I1213 06:32:16.215676 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:32:16 crc kubenswrapper[5048]: I1213 06:32:16.215735 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:32:19 crc kubenswrapper[5048]: I1213 06:32:19.389149 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:32:19 crc kubenswrapper[5048]: I1213 06:32:19.393228 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:32:20 crc kubenswrapper[5048]: I1213 06:32:20.365979 5048 patch_prober.go:28] interesting pod/downloads-7954f5f757-xclb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Dec 13 06:32:20 crc kubenswrapper[5048]: I1213 06:32:20.366047 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xclb7" podUID="f567d62b-7941-466c-84c8-06f6854000ba" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Dec 13 06:32:20 crc kubenswrapper[5048]: I1213 06:32:20.366671 5048 patch_prober.go:28] interesting pod/downloads-7954f5f757-xclb7 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Dec 13 06:32:20 crc kubenswrapper[5048]: I1213 06:32:20.366691 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-xclb7" podUID="f567d62b-7941-466c-84c8-06f6854000ba" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Dec 13 06:32:20 crc kubenswrapper[5048]: I1213 06:32:20.366760 5048 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-xclb7" Dec 13 06:32:20 crc kubenswrapper[5048]: I1213 06:32:20.367565 5048 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"d129e8a65e094be058f84a723e5100bb31803a2ddd188b6b79731de4b5da44be"} pod="openshift-console/downloads-7954f5f757-xclb7" containerMessage="Container download-server failed liveness probe, will be restarted" Dec 13 06:32:20 crc kubenswrapper[5048]: I1213 06:32:20.367713 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-xclb7" podUID="f567d62b-7941-466c-84c8-06f6854000ba" containerName="download-server" containerID="cri-o://d129e8a65e094be058f84a723e5100bb31803a2ddd188b6b79731de4b5da44be" gracePeriod=2 Dec 13 06:32:20 crc kubenswrapper[5048]: I1213 06:32:20.368173 5048 patch_prober.go:28] interesting pod/downloads-7954f5f757-xclb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Dec 13 06:32:20 crc kubenswrapper[5048]: I1213 06:32:20.368220 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xclb7" podUID="f567d62b-7941-466c-84c8-06f6854000ba" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Dec 13 06:32:21 crc kubenswrapper[5048]: I1213 06:32:21.423899 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-qcdrc"] Dec 13 06:32:21 crc kubenswrapper[5048]: I1213 06:32:21.424528 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" podUID="6567f364-4ab2-489b-837f-1e7c194f311d" containerName="controller-manager" containerID="cri-o://940dfe480ca8d9d07ac51c5b1f83efb6c9ec2af812cc6942e7b653db1597b22b" gracePeriod=30 Dec 13 06:32:21 crc kubenswrapper[5048]: I1213 06:32:21.431034 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn"] Dec 13 06:32:21 crc kubenswrapper[5048]: I1213 06:32:21.431273 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" podUID="2754f9a4-6375-4864-9d69-7674e3dfe490" containerName="route-controller-manager" containerID="cri-o://37e4dd9d4e5b70d1b371ddbd76600fcb6033b8bbeb0e279746791dfc13cc199a" gracePeriod=30 Dec 13 06:32:22 crc kubenswrapper[5048]: I1213 06:32:22.654318 5048 generic.go:334] "Generic (PLEG): container finished" podID="f567d62b-7941-466c-84c8-06f6854000ba" containerID="d129e8a65e094be058f84a723e5100bb31803a2ddd188b6b79731de4b5da44be" exitCode=0 Dec 13 06:32:22 crc kubenswrapper[5048]: I1213 06:32:22.654360 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-xclb7" event={"ID":"f567d62b-7941-466c-84c8-06f6854000ba","Type":"ContainerDied","Data":"d129e8a65e094be058f84a723e5100bb31803a2ddd188b6b79731de4b5da44be"} Dec 13 06:32:23 crc kubenswrapper[5048]: I1213 06:32:23.661264 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-cluster-samples-operator_cluster-samples-operator-665b6dd947-5qgj6_493b8318-26e3-4f4e-b5d1-e1b2fd57de35/cluster-samples-operator/0.log" Dec 13 06:32:23 crc kubenswrapper[5048]: I1213 06:32:23.661553 5048 generic.go:334] "Generic (PLEG): container finished" podID="493b8318-26e3-4f4e-b5d1-e1b2fd57de35" containerID="d34d935c6442ac92896bce536d3bca0190126488f6f8c4b8f7708d3138de33ee" exitCode=2 Dec 13 06:32:23 crc kubenswrapper[5048]: I1213 06:32:23.661601 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5qgj6" event={"ID":"493b8318-26e3-4f4e-b5d1-e1b2fd57de35","Type":"ContainerDied","Data":"d34d935c6442ac92896bce536d3bca0190126488f6f8c4b8f7708d3138de33ee"} Dec 13 06:32:23 crc kubenswrapper[5048]: I1213 06:32:23.662081 5048 scope.go:117] "RemoveContainer" containerID="d34d935c6442ac92896bce536d3bca0190126488f6f8c4b8f7708d3138de33ee" Dec 13 06:32:23 crc kubenswrapper[5048]: I1213 06:32:23.664252 5048 generic.go:334] "Generic (PLEG): container finished" podID="2754f9a4-6375-4864-9d69-7674e3dfe490" containerID="37e4dd9d4e5b70d1b371ddbd76600fcb6033b8bbeb0e279746791dfc13cc199a" exitCode=0 Dec 13 06:32:23 crc kubenswrapper[5048]: I1213 06:32:23.664299 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" event={"ID":"2754f9a4-6375-4864-9d69-7674e3dfe490","Type":"ContainerDied","Data":"37e4dd9d4e5b70d1b371ddbd76600fcb6033b8bbeb0e279746791dfc13cc199a"} Dec 13 06:32:23 crc kubenswrapper[5048]: I1213 06:32:23.666379 5048 generic.go:334] "Generic (PLEG): container finished" podID="6567f364-4ab2-489b-837f-1e7c194f311d" containerID="940dfe480ca8d9d07ac51c5b1f83efb6c9ec2af812cc6942e7b653db1597b22b" exitCode=0 Dec 13 06:32:23 crc kubenswrapper[5048]: I1213 06:32:23.666412 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" event={"ID":"6567f364-4ab2-489b-837f-1e7c194f311d","Type":"ContainerDied","Data":"940dfe480ca8d9d07ac51c5b1f83efb6c9ec2af812cc6942e7b653db1597b22b"} Dec 13 06:32:24 crc kubenswrapper[5048]: I1213 06:32:24.336796 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:32:28 crc kubenswrapper[5048]: I1213 06:32:28.976509 5048 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-qcdrc container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Dec 13 06:32:28 crc kubenswrapper[5048]: I1213 06:32:28.976956 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" podUID="6567f364-4ab2-489b-837f-1e7c194f311d" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" Dec 13 06:32:30 crc kubenswrapper[5048]: I1213 06:32:30.353114 5048 patch_prober.go:28] interesting pod/downloads-7954f5f757-xclb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Dec 13 06:32:30 crc kubenswrapper[5048]: I1213 06:32:30.353219 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xclb7" podUID="f567d62b-7941-466c-84c8-06f6854000ba" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Dec 13 06:32:30 crc kubenswrapper[5048]: I1213 06:32:30.986770 5048 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-lzrgn container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.28:8443/healthz\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Dec 13 06:32:30 crc kubenswrapper[5048]: I1213 06:32:30.987125 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" podUID="2754f9a4-6375-4864-9d69-7674e3dfe490" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.28:8443/healthz\": dial tcp 10.217.0.28:8443: connect: connection refused" Dec 13 06:32:31 crc kubenswrapper[5048]: I1213 06:32:31.274277 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-28jpz" Dec 13 06:32:36 crc kubenswrapper[5048]: I1213 06:32:36.605309 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 13 06:32:38 crc kubenswrapper[5048]: I1213 06:32:38.975579 5048 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-qcdrc container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Dec 13 06:32:38 crc kubenswrapper[5048]: I1213 06:32:38.975677 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" podUID="6567f364-4ab2-489b-837f-1e7c194f311d" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" Dec 13 06:32:40 crc kubenswrapper[5048]: I1213 06:32:40.351975 5048 patch_prober.go:28] interesting pod/downloads-7954f5f757-xclb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Dec 13 06:32:40 crc kubenswrapper[5048]: I1213 06:32:40.352075 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xclb7" podUID="f567d62b-7941-466c-84c8-06f6854000ba" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Dec 13 06:32:40 crc kubenswrapper[5048]: I1213 06:32:40.987159 5048 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-lzrgn container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.28:8443/healthz\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Dec 13 06:32:40 crc kubenswrapper[5048]: I1213 06:32:40.987715 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" podUID="2754f9a4-6375-4864-9d69-7674e3dfe490" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.28:8443/healthz\": dial tcp 10.217.0.28:8443: connect: connection refused" Dec 13 06:32:44 crc kubenswrapper[5048]: I1213 06:32:44.014287 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 13 06:32:44 crc kubenswrapper[5048]: E1213 06:32:44.014734 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a458fdb3-0662-44a0-8df6-b81dcb66a669" containerName="collect-profiles" Dec 13 06:32:44 crc kubenswrapper[5048]: I1213 06:32:44.014752 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="a458fdb3-0662-44a0-8df6-b81dcb66a669" containerName="collect-profiles" Dec 13 06:32:44 crc kubenswrapper[5048]: E1213 06:32:44.014764 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43b8b15b-e50d-42e2-ae56-87a3f2aa69df" containerName="pruner" Dec 13 06:32:44 crc kubenswrapper[5048]: I1213 06:32:44.014773 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="43b8b15b-e50d-42e2-ae56-87a3f2aa69df" containerName="pruner" Dec 13 06:32:44 crc kubenswrapper[5048]: I1213 06:32:44.014903 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="43b8b15b-e50d-42e2-ae56-87a3f2aa69df" containerName="pruner" Dec 13 06:32:44 crc kubenswrapper[5048]: I1213 06:32:44.014924 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="a458fdb3-0662-44a0-8df6-b81dcb66a669" containerName="collect-profiles" Dec 13 06:32:44 crc kubenswrapper[5048]: I1213 06:32:44.015378 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 13 06:32:44 crc kubenswrapper[5048]: I1213 06:32:44.027188 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 13 06:32:44 crc kubenswrapper[5048]: I1213 06:32:44.210743 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/63a58dec-0fcf-4425-820a-19f710e3d0d3-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"63a58dec-0fcf-4425-820a-19f710e3d0d3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 13 06:32:44 crc kubenswrapper[5048]: I1213 06:32:44.210838 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/63a58dec-0fcf-4425-820a-19f710e3d0d3-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"63a58dec-0fcf-4425-820a-19f710e3d0d3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 13 06:32:44 crc kubenswrapper[5048]: I1213 06:32:44.311759 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/63a58dec-0fcf-4425-820a-19f710e3d0d3-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"63a58dec-0fcf-4425-820a-19f710e3d0d3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 13 06:32:44 crc kubenswrapper[5048]: I1213 06:32:44.311860 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/63a58dec-0fcf-4425-820a-19f710e3d0d3-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"63a58dec-0fcf-4425-820a-19f710e3d0d3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 13 06:32:44 crc kubenswrapper[5048]: I1213 06:32:44.311954 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/63a58dec-0fcf-4425-820a-19f710e3d0d3-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"63a58dec-0fcf-4425-820a-19f710e3d0d3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 13 06:32:44 crc kubenswrapper[5048]: I1213 06:32:44.336934 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/63a58dec-0fcf-4425-820a-19f710e3d0d3-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"63a58dec-0fcf-4425-820a-19f710e3d0d3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 13 06:32:44 crc kubenswrapper[5048]: I1213 06:32:44.355302 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 13 06:32:46 crc kubenswrapper[5048]: I1213 06:32:46.216071 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:32:46 crc kubenswrapper[5048]: I1213 06:32:46.216173 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:32:48 crc kubenswrapper[5048]: I1213 06:32:48.975403 5048 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-qcdrc container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Dec 13 06:32:48 crc kubenswrapper[5048]: I1213 06:32:48.975494 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" podUID="6567f364-4ab2-489b-837f-1e7c194f311d" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" Dec 13 06:32:50 crc kubenswrapper[5048]: I1213 06:32:50.016893 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 13 06:32:50 crc kubenswrapper[5048]: I1213 06:32:50.018528 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 13 06:32:50 crc kubenswrapper[5048]: I1213 06:32:50.033244 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 13 06:32:50 crc kubenswrapper[5048]: I1213 06:32:50.195381 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/268d907d-1730-456e-8e52-67e58aca607b-kubelet-dir\") pod \"installer-9-crc\" (UID: \"268d907d-1730-456e-8e52-67e58aca607b\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 13 06:32:50 crc kubenswrapper[5048]: I1213 06:32:50.195497 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/268d907d-1730-456e-8e52-67e58aca607b-kube-api-access\") pod \"installer-9-crc\" (UID: \"268d907d-1730-456e-8e52-67e58aca607b\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 13 06:32:50 crc kubenswrapper[5048]: I1213 06:32:50.195567 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/268d907d-1730-456e-8e52-67e58aca607b-var-lock\") pod \"installer-9-crc\" (UID: \"268d907d-1730-456e-8e52-67e58aca607b\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 13 06:32:50 crc kubenswrapper[5048]: I1213 06:32:50.297425 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/268d907d-1730-456e-8e52-67e58aca607b-kube-api-access\") pod \"installer-9-crc\" (UID: \"268d907d-1730-456e-8e52-67e58aca607b\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 13 06:32:50 crc kubenswrapper[5048]: I1213 06:32:50.297510 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/268d907d-1730-456e-8e52-67e58aca607b-var-lock\") pod \"installer-9-crc\" (UID: \"268d907d-1730-456e-8e52-67e58aca607b\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 13 06:32:50 crc kubenswrapper[5048]: I1213 06:32:50.297600 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/268d907d-1730-456e-8e52-67e58aca607b-kubelet-dir\") pod \"installer-9-crc\" (UID: \"268d907d-1730-456e-8e52-67e58aca607b\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 13 06:32:50 crc kubenswrapper[5048]: I1213 06:32:50.297678 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/268d907d-1730-456e-8e52-67e58aca607b-kubelet-dir\") pod \"installer-9-crc\" (UID: \"268d907d-1730-456e-8e52-67e58aca607b\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 13 06:32:50 crc kubenswrapper[5048]: I1213 06:32:50.297953 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/268d907d-1730-456e-8e52-67e58aca607b-var-lock\") pod \"installer-9-crc\" (UID: \"268d907d-1730-456e-8e52-67e58aca607b\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 13 06:32:50 crc kubenswrapper[5048]: I1213 06:32:50.320129 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/268d907d-1730-456e-8e52-67e58aca607b-kube-api-access\") pod \"installer-9-crc\" (UID: \"268d907d-1730-456e-8e52-67e58aca607b\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 13 06:32:50 crc kubenswrapper[5048]: I1213 06:32:50.353285 5048 patch_prober.go:28] interesting pod/downloads-7954f5f757-xclb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Dec 13 06:32:50 crc kubenswrapper[5048]: I1213 06:32:50.353359 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xclb7" podUID="f567d62b-7941-466c-84c8-06f6854000ba" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Dec 13 06:32:50 crc kubenswrapper[5048]: I1213 06:32:50.392576 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 13 06:32:50 crc kubenswrapper[5048]: I1213 06:32:50.988547 5048 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-lzrgn container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.28:8443/healthz\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Dec 13 06:32:50 crc kubenswrapper[5048]: I1213 06:32:50.988964 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" podUID="2754f9a4-6375-4864-9d69-7674e3dfe490" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.28:8443/healthz\": dial tcp 10.217.0.28:8443: connect: connection refused" Dec 13 06:32:52 crc kubenswrapper[5048]: I1213 06:32:52.258207 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 13 06:32:52 crc kubenswrapper[5048]: I1213 06:32:52.329916 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9fb25d1d-3347-47fc-aae0-2fb03a1ad397-kube-api-access\") pod \"9fb25d1d-3347-47fc-aae0-2fb03a1ad397\" (UID: \"9fb25d1d-3347-47fc-aae0-2fb03a1ad397\") " Dec 13 06:32:52 crc kubenswrapper[5048]: I1213 06:32:52.330111 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9fb25d1d-3347-47fc-aae0-2fb03a1ad397-kubelet-dir\") pod \"9fb25d1d-3347-47fc-aae0-2fb03a1ad397\" (UID: \"9fb25d1d-3347-47fc-aae0-2fb03a1ad397\") " Dec 13 06:32:52 crc kubenswrapper[5048]: I1213 06:32:52.330169 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9fb25d1d-3347-47fc-aae0-2fb03a1ad397-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "9fb25d1d-3347-47fc-aae0-2fb03a1ad397" (UID: "9fb25d1d-3347-47fc-aae0-2fb03a1ad397"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:32:52 crc kubenswrapper[5048]: I1213 06:32:52.331231 5048 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9fb25d1d-3347-47fc-aae0-2fb03a1ad397-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 13 06:32:52 crc kubenswrapper[5048]: I1213 06:32:52.333155 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9fb25d1d-3347-47fc-aae0-2fb03a1ad397-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "9fb25d1d-3347-47fc-aae0-2fb03a1ad397" (UID: "9fb25d1d-3347-47fc-aae0-2fb03a1ad397"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:32:52 crc kubenswrapper[5048]: I1213 06:32:52.432793 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9fb25d1d-3347-47fc-aae0-2fb03a1ad397-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 13 06:32:52 crc kubenswrapper[5048]: I1213 06:32:52.944062 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"9fb25d1d-3347-47fc-aae0-2fb03a1ad397","Type":"ContainerDied","Data":"8931031491d25568e12002c034c52f6d97c58f271b8e1c1a3e4788bfa6abc947"} Dec 13 06:32:52 crc kubenswrapper[5048]: I1213 06:32:52.944100 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8931031491d25568e12002c034c52f6d97c58f271b8e1c1a3e4788bfa6abc947" Dec 13 06:32:52 crc kubenswrapper[5048]: I1213 06:32:52.944139 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 13 06:32:59 crc kubenswrapper[5048]: I1213 06:32:59.976136 5048 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-qcdrc container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 13 06:32:59 crc kubenswrapper[5048]: I1213 06:32:59.976896 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" podUID="6567f364-4ab2-489b-837f-1e7c194f311d" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 13 06:33:00 crc kubenswrapper[5048]: I1213 06:33:00.352134 5048 patch_prober.go:28] interesting pod/downloads-7954f5f757-xclb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Dec 13 06:33:00 crc kubenswrapper[5048]: I1213 06:33:00.352184 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xclb7" podUID="f567d62b-7941-466c-84c8-06f6854000ba" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Dec 13 06:33:01 crc kubenswrapper[5048]: I1213 06:33:01.986313 5048 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-lzrgn container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.28:8443/healthz\": dial tcp 10.217.0.28:8443: i/o timeout" start-of-body= Dec 13 06:33:01 crc kubenswrapper[5048]: I1213 06:33:01.986710 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" podUID="2754f9a4-6375-4864-9d69-7674e3dfe490" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.28:8443/healthz\": dial tcp 10.217.0.28:8443: i/o timeout" Dec 13 06:33:06 crc kubenswrapper[5048]: E1213 06:33:06.006701 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \"/var/tmp/container_images_storage4178038258/2\": happened during read: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Dec 13 06:33:06 crc kubenswrapper[5048]: E1213 06:33:06.007770 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4c4wp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-dmqg7_openshift-marketplace(16d5a4f5-03a1-46f1-990b-f69dc88f1e9d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \"/var/tmp/container_images_storage4178038258/2\": happened during read: context canceled" logger="UnhandledError" Dec 13 06:33:06 crc kubenswrapper[5048]: E1213 06:33:06.009154 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \\\"/var/tmp/container_images_storage4178038258/2\\\": happened during read: context canceled\"" pod="openshift-marketplace/redhat-marketplace-dmqg7" podUID="16d5a4f5-03a1-46f1-990b-f69dc88f1e9d" Dec 13 06:33:06 crc kubenswrapper[5048]: E1213 06:33:06.084733 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Dec 13 06:33:06 crc kubenswrapper[5048]: E1213 06:33:06.084870 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-895rz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-k9jhn_openshift-marketplace(87790266-395a-4687-895f-f524c8b2171b): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 13 06:33:06 crc kubenswrapper[5048]: E1213 06:33:06.086086 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-k9jhn" podUID="87790266-395a-4687-895f-f524c8b2171b" Dec 13 06:33:07 crc kubenswrapper[5048]: E1213 06:33:07.767457 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-k9jhn" podUID="87790266-395a-4687-895f-f524c8b2171b" Dec 13 06:33:07 crc kubenswrapper[5048]: E1213 06:33:07.830220 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Dec 13 06:33:07 crc kubenswrapper[5048]: E1213 06:33:07.830366 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vvqcq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-l9l5s_openshift-marketplace(6885bcbf-dd86-4e5a-bd21-92395c5ae676): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 13 06:33:07 crc kubenswrapper[5048]: E1213 06:33:07.831705 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-l9l5s" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" Dec 13 06:33:07 crc kubenswrapper[5048]: E1213 06:33:07.843134 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Dec 13 06:33:07 crc kubenswrapper[5048]: E1213 06:33:07.843354 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-r8r72,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-vfwpt_openshift-marketplace(07531c82-87d1-409f-9c5a-4910633b5786): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 13 06:33:07 crc kubenswrapper[5048]: E1213 06:33:07.845328 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-vfwpt" podUID="07531c82-87d1-409f-9c5a-4910633b5786" Dec 13 06:33:07 crc kubenswrapper[5048]: E1213 06:33:07.848223 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Dec 13 06:33:07 crc kubenswrapper[5048]: E1213 06:33:07.848332 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6bz7t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-q8ltl_openshift-marketplace(f7b93b87-31d6-4279-8ac9-b834417f66d9): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 13 06:33:07 crc kubenswrapper[5048]: E1213 06:33:07.849787 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-q8ltl" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" Dec 13 06:33:07 crc kubenswrapper[5048]: I1213 06:33:07.857173 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" Dec 13 06:33:07 crc kubenswrapper[5048]: I1213 06:33:07.863876 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" Dec 13 06:33:07 crc kubenswrapper[5048]: I1213 06:33:07.891065 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz"] Dec 13 06:33:07 crc kubenswrapper[5048]: E1213 06:33:07.891275 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fb25d1d-3347-47fc-aae0-2fb03a1ad397" containerName="pruner" Dec 13 06:33:07 crc kubenswrapper[5048]: I1213 06:33:07.891287 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fb25d1d-3347-47fc-aae0-2fb03a1ad397" containerName="pruner" Dec 13 06:33:07 crc kubenswrapper[5048]: E1213 06:33:07.891299 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6567f364-4ab2-489b-837f-1e7c194f311d" containerName="controller-manager" Dec 13 06:33:07 crc kubenswrapper[5048]: I1213 06:33:07.891305 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="6567f364-4ab2-489b-837f-1e7c194f311d" containerName="controller-manager" Dec 13 06:33:07 crc kubenswrapper[5048]: E1213 06:33:07.891320 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2754f9a4-6375-4864-9d69-7674e3dfe490" containerName="route-controller-manager" Dec 13 06:33:07 crc kubenswrapper[5048]: I1213 06:33:07.891326 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="2754f9a4-6375-4864-9d69-7674e3dfe490" containerName="route-controller-manager" Dec 13 06:33:07 crc kubenswrapper[5048]: I1213 06:33:07.891423 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="6567f364-4ab2-489b-837f-1e7c194f311d" containerName="controller-manager" Dec 13 06:33:07 crc kubenswrapper[5048]: I1213 06:33:07.891455 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="9fb25d1d-3347-47fc-aae0-2fb03a1ad397" containerName="pruner" Dec 13 06:33:07 crc kubenswrapper[5048]: I1213 06:33:07.891468 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="2754f9a4-6375-4864-9d69-7674e3dfe490" containerName="route-controller-manager" Dec 13 06:33:07 crc kubenswrapper[5048]: I1213 06:33:07.891807 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" Dec 13 06:33:07 crc kubenswrapper[5048]: I1213 06:33:07.905448 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz"] Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.042972 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6567f364-4ab2-489b-837f-1e7c194f311d-client-ca\") pod \"6567f364-4ab2-489b-837f-1e7c194f311d\" (UID: \"6567f364-4ab2-489b-837f-1e7c194f311d\") " Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.043051 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2754f9a4-6375-4864-9d69-7674e3dfe490-client-ca\") pod \"2754f9a4-6375-4864-9d69-7674e3dfe490\" (UID: \"2754f9a4-6375-4864-9d69-7674e3dfe490\") " Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.043129 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x8q7r\" (UniqueName: \"kubernetes.io/projected/6567f364-4ab2-489b-837f-1e7c194f311d-kube-api-access-x8q7r\") pod \"6567f364-4ab2-489b-837f-1e7c194f311d\" (UID: \"6567f364-4ab2-489b-837f-1e7c194f311d\") " Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.043354 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6567f364-4ab2-489b-837f-1e7c194f311d-client-ca" (OuterVolumeSpecName: "client-ca") pod "6567f364-4ab2-489b-837f-1e7c194f311d" (UID: "6567f364-4ab2-489b-837f-1e7c194f311d"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.044024 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2754f9a4-6375-4864-9d69-7674e3dfe490-client-ca" (OuterVolumeSpecName: "client-ca") pod "2754f9a4-6375-4864-9d69-7674e3dfe490" (UID: "2754f9a4-6375-4864-9d69-7674e3dfe490"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.044168 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2754f9a4-6375-4864-9d69-7674e3dfe490-serving-cert\") pod \"2754f9a4-6375-4864-9d69-7674e3dfe490\" (UID: \"2754f9a4-6375-4864-9d69-7674e3dfe490\") " Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.044231 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6567f364-4ab2-489b-837f-1e7c194f311d-config\") pod \"6567f364-4ab2-489b-837f-1e7c194f311d\" (UID: \"6567f364-4ab2-489b-837f-1e7c194f311d\") " Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.044263 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p6skd\" (UniqueName: \"kubernetes.io/projected/2754f9a4-6375-4864-9d69-7674e3dfe490-kube-api-access-p6skd\") pod \"2754f9a4-6375-4864-9d69-7674e3dfe490\" (UID: \"2754f9a4-6375-4864-9d69-7674e3dfe490\") " Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.044297 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6567f364-4ab2-489b-837f-1e7c194f311d-proxy-ca-bundles\") pod \"6567f364-4ab2-489b-837f-1e7c194f311d\" (UID: \"6567f364-4ab2-489b-837f-1e7c194f311d\") " Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.044341 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2754f9a4-6375-4864-9d69-7674e3dfe490-config\") pod \"2754f9a4-6375-4864-9d69-7674e3dfe490\" (UID: \"2754f9a4-6375-4864-9d69-7674e3dfe490\") " Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.044369 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6567f364-4ab2-489b-837f-1e7c194f311d-serving-cert\") pod \"6567f364-4ab2-489b-837f-1e7c194f311d\" (UID: \"6567f364-4ab2-489b-837f-1e7c194f311d\") " Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.045156 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6567f364-4ab2-489b-837f-1e7c194f311d-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "6567f364-4ab2-489b-837f-1e7c194f311d" (UID: "6567f364-4ab2-489b-837f-1e7c194f311d"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.045287 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6567f364-4ab2-489b-837f-1e7c194f311d-config" (OuterVolumeSpecName: "config") pod "6567f364-4ab2-489b-837f-1e7c194f311d" (UID: "6567f364-4ab2-489b-837f-1e7c194f311d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.045563 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2754f9a4-6375-4864-9d69-7674e3dfe490-config" (OuterVolumeSpecName: "config") pod "2754f9a4-6375-4864-9d69-7674e3dfe490" (UID: "2754f9a4-6375-4864-9d69-7674e3dfe490"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.045588 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da283285-f622-4286-be22-0c3e4d244cd1-config\") pod \"route-controller-manager-99ff448dc-2ckpz\" (UID: \"da283285-f622-4286-be22-0c3e4d244cd1\") " pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.045668 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da283285-f622-4286-be22-0c3e4d244cd1-client-ca\") pod \"route-controller-manager-99ff448dc-2ckpz\" (UID: \"da283285-f622-4286-be22-0c3e4d244cd1\") " pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.045728 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da283285-f622-4286-be22-0c3e4d244cd1-serving-cert\") pod \"route-controller-manager-99ff448dc-2ckpz\" (UID: \"da283285-f622-4286-be22-0c3e4d244cd1\") " pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.045752 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mbkfl\" (UniqueName: \"kubernetes.io/projected/da283285-f622-4286-be22-0c3e4d244cd1-kube-api-access-mbkfl\") pod \"route-controller-manager-99ff448dc-2ckpz\" (UID: \"da283285-f622-4286-be22-0c3e4d244cd1\") " pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.045866 5048 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6567f364-4ab2-489b-837f-1e7c194f311d-client-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.045916 5048 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2754f9a4-6375-4864-9d69-7674e3dfe490-client-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.045930 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6567f364-4ab2-489b-837f-1e7c194f311d-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.045987 5048 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6567f364-4ab2-489b-837f-1e7c194f311d-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.046003 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2754f9a4-6375-4864-9d69-7674e3dfe490-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.048120 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" event={"ID":"2754f9a4-6375-4864-9d69-7674e3dfe490","Type":"ContainerDied","Data":"58d12ee173e188ce135ebf858a55523a9dd1de59bfb436270b14b5c2f7e655da"} Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.048172 5048 scope.go:117] "RemoveContainer" containerID="37e4dd9d4e5b70d1b371ddbd76600fcb6033b8bbeb0e279746791dfc13cc199a" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.048289 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.050111 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2754f9a4-6375-4864-9d69-7674e3dfe490-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "2754f9a4-6375-4864-9d69-7674e3dfe490" (UID: "2754f9a4-6375-4864-9d69-7674e3dfe490"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.050283 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2754f9a4-6375-4864-9d69-7674e3dfe490-kube-api-access-p6skd" (OuterVolumeSpecName: "kube-api-access-p6skd") pod "2754f9a4-6375-4864-9d69-7674e3dfe490" (UID: "2754f9a4-6375-4864-9d69-7674e3dfe490"). InnerVolumeSpecName "kube-api-access-p6skd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.053678 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" event={"ID":"6567f364-4ab2-489b-837f-1e7c194f311d","Type":"ContainerDied","Data":"04ec043e47c1dbba24ca669cd1953332691573bfade510dcbfe974df9f38e22e"} Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.053793 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-qcdrc" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.053957 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6567f364-4ab2-489b-837f-1e7c194f311d-kube-api-access-x8q7r" (OuterVolumeSpecName: "kube-api-access-x8q7r") pod "6567f364-4ab2-489b-837f-1e7c194f311d" (UID: "6567f364-4ab2-489b-837f-1e7c194f311d"). InnerVolumeSpecName "kube-api-access-x8q7r". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.054182 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6567f364-4ab2-489b-837f-1e7c194f311d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6567f364-4ab2-489b-837f-1e7c194f311d" (UID: "6567f364-4ab2-489b-837f-1e7c194f311d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.147392 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da283285-f622-4286-be22-0c3e4d244cd1-client-ca\") pod \"route-controller-manager-99ff448dc-2ckpz\" (UID: \"da283285-f622-4286-be22-0c3e4d244cd1\") " pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.147797 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da283285-f622-4286-be22-0c3e4d244cd1-serving-cert\") pod \"route-controller-manager-99ff448dc-2ckpz\" (UID: \"da283285-f622-4286-be22-0c3e4d244cd1\") " pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.147822 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mbkfl\" (UniqueName: \"kubernetes.io/projected/da283285-f622-4286-be22-0c3e4d244cd1-kube-api-access-mbkfl\") pod \"route-controller-manager-99ff448dc-2ckpz\" (UID: \"da283285-f622-4286-be22-0c3e4d244cd1\") " pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.147875 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da283285-f622-4286-be22-0c3e4d244cd1-config\") pod \"route-controller-manager-99ff448dc-2ckpz\" (UID: \"da283285-f622-4286-be22-0c3e4d244cd1\") " pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.147924 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6567f364-4ab2-489b-837f-1e7c194f311d-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.147936 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x8q7r\" (UniqueName: \"kubernetes.io/projected/6567f364-4ab2-489b-837f-1e7c194f311d-kube-api-access-x8q7r\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.147946 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2754f9a4-6375-4864-9d69-7674e3dfe490-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.147954 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p6skd\" (UniqueName: \"kubernetes.io/projected/2754f9a4-6375-4864-9d69-7674e3dfe490-kube-api-access-p6skd\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.148972 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da283285-f622-4286-be22-0c3e4d244cd1-client-ca\") pod \"route-controller-manager-99ff448dc-2ckpz\" (UID: \"da283285-f622-4286-be22-0c3e4d244cd1\") " pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.149019 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da283285-f622-4286-be22-0c3e4d244cd1-config\") pod \"route-controller-manager-99ff448dc-2ckpz\" (UID: \"da283285-f622-4286-be22-0c3e4d244cd1\") " pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.159412 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da283285-f622-4286-be22-0c3e4d244cd1-serving-cert\") pod \"route-controller-manager-99ff448dc-2ckpz\" (UID: \"da283285-f622-4286-be22-0c3e4d244cd1\") " pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.165076 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mbkfl\" (UniqueName: \"kubernetes.io/projected/da283285-f622-4286-be22-0c3e4d244cd1-kube-api-access-mbkfl\") pod \"route-controller-manager-99ff448dc-2ckpz\" (UID: \"da283285-f622-4286-be22-0c3e4d244cd1\") " pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.207052 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.377640 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn"] Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.380773 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lzrgn"] Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.392227 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-qcdrc"] Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.396073 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-qcdrc"] Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.574299 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2754f9a4-6375-4864-9d69-7674e3dfe490" path="/var/lib/kubelet/pods/2754f9a4-6375-4864-9d69-7674e3dfe490/volumes" Dec 13 06:33:08 crc kubenswrapper[5048]: I1213 06:33:08.575381 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6567f364-4ab2-489b-837f-1e7c194f311d" path="/var/lib/kubelet/pods/6567f364-4ab2-489b-837f-1e7c194f311d/volumes" Dec 13 06:33:10 crc kubenswrapper[5048]: E1213 06:33:10.120512 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-q8ltl" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" Dec 13 06:33:10 crc kubenswrapper[5048]: E1213 06:33:10.120532 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-l9l5s" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" Dec 13 06:33:10 crc kubenswrapper[5048]: E1213 06:33:10.290697 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Dec 13 06:33:10 crc kubenswrapper[5048]: E1213 06:33:10.290843 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cksfn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-lqvmp_openshift-marketplace(57717114-1abd-46bf-bdbd-0a785d734cd3): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 13 06:33:10 crc kubenswrapper[5048]: E1213 06:33:10.292374 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-lqvmp" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" Dec 13 06:33:10 crc kubenswrapper[5048]: I1213 06:33:10.352337 5048 patch_prober.go:28] interesting pod/downloads-7954f5f757-xclb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Dec 13 06:33:10 crc kubenswrapper[5048]: I1213 06:33:10.352407 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xclb7" podUID="f567d62b-7941-466c-84c8-06f6854000ba" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Dec 13 06:33:10 crc kubenswrapper[5048]: E1213 06:33:10.454812 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Dec 13 06:33:10 crc kubenswrapper[5048]: E1213 06:33:10.454989 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cwtbg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-hjls5_openshift-marketplace(1ea76a75-e8de-4a91-89af-726df36e8a21): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 13 06:33:10 crc kubenswrapper[5048]: E1213 06:33:10.456607 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-hjls5" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" Dec 13 06:33:11 crc kubenswrapper[5048]: E1213 06:33:11.553221 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-hjls5" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" Dec 13 06:33:11 crc kubenswrapper[5048]: E1213 06:33:11.572764 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Dec 13 06:33:11 crc kubenswrapper[5048]: E1213 06:33:11.572895 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kbc9l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-rb7fl_openshift-marketplace(d5fb554d-84e3-4bf0-857f-a64da6e6a36f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 13 06:33:11 crc kubenswrapper[5048]: E1213 06:33:11.574133 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-rb7fl" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" Dec 13 06:33:11 crc kubenswrapper[5048]: I1213 06:33:11.584633 5048 scope.go:117] "RemoveContainer" containerID="940dfe480ca8d9d07ac51c5b1f83efb6c9ec2af812cc6942e7b653db1597b22b" Dec 13 06:33:11 crc kubenswrapper[5048]: I1213 06:33:11.831120 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 13 06:33:11 crc kubenswrapper[5048]: I1213 06:33:11.877120 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz"] Dec 13 06:33:11 crc kubenswrapper[5048]: W1213 06:33:11.889662 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podda283285_f622_4286_be22_0c3e4d244cd1.slice/crio-38ad9017e2f5ca54aa86a25f606ee16747224a84fa410050010831fccc0ba2ec WatchSource:0}: Error finding container 38ad9017e2f5ca54aa86a25f606ee16747224a84fa410050010831fccc0ba2ec: Status 404 returned error can't find the container with id 38ad9017e2f5ca54aa86a25f606ee16747224a84fa410050010831fccc0ba2ec Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.069634 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.079556 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-cluster-samples-operator_cluster-samples-operator-665b6dd947-5qgj6_493b8318-26e3-4f4e-b5d1-e1b2fd57de35/cluster-samples-operator/0.log" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.079647 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5qgj6" event={"ID":"493b8318-26e3-4f4e-b5d1-e1b2fd57de35","Type":"ContainerStarted","Data":"cb2f92adeeac2a042766c190504a7152d7d6d88bf2863cf0193f70652cf91650"} Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.086837 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"63a58dec-0fcf-4425-820a-19f710e3d0d3","Type":"ContainerStarted","Data":"5678a6a06e9ad408e418d54d2136cffb741fba7f96effd4b06123e080d649c6d"} Dec 13 06:33:12 crc kubenswrapper[5048]: W1213 06:33:12.088150 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod268d907d_1730_456e_8e52_67e58aca607b.slice/crio-273cb1e4ab78e778a25e8c556d3664a544cce9de01598c9bd0215f8c39ce448b WatchSource:0}: Error finding container 273cb1e4ab78e778a25e8c556d3664a544cce9de01598c9bd0215f8c39ce448b: Status 404 returned error can't find the container with id 273cb1e4ab78e778a25e8c556d3664a544cce9de01598c9bd0215f8c39ce448b Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.100771 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-tm62z" event={"ID":"226b24e2-92c6-43d1-a621-09702ffa8fd4","Type":"ContainerStarted","Data":"3957663247e9653273b366f805f1349df3c432481fa5791a8fd1afb6a864f397"} Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.102134 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" event={"ID":"da283285-f622-4286-be22-0c3e4d244cd1","Type":"ContainerStarted","Data":"38ad9017e2f5ca54aa86a25f606ee16747224a84fa410050010831fccc0ba2ec"} Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.104114 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-xclb7" event={"ID":"f567d62b-7941-466c-84c8-06f6854000ba","Type":"ContainerStarted","Data":"712f10de44af8457d1a9292b56231692da999ab1be9a729964dcd67f306c9e47"} Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.104923 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-xclb7" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.105320 5048 patch_prober.go:28] interesting pod/downloads-7954f5f757-xclb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.105359 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xclb7" podUID="f567d62b-7941-466c-84c8-06f6854000ba" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.505048 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg"] Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.507986 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.514101 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.514815 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.515167 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.515508 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.515500 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.515564 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.526707 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.529948 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg"] Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.606545 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-proxy-ca-bundles\") pod \"controller-manager-d49cf5dcf-6dfqg\" (UID: \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\") " pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.606749 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-serving-cert\") pod \"controller-manager-d49cf5dcf-6dfqg\" (UID: \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\") " pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.606807 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flj2k\" (UniqueName: \"kubernetes.io/projected/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-kube-api-access-flj2k\") pod \"controller-manager-d49cf5dcf-6dfqg\" (UID: \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\") " pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.606874 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-client-ca\") pod \"controller-manager-d49cf5dcf-6dfqg\" (UID: \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\") " pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.606933 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-config\") pod \"controller-manager-d49cf5dcf-6dfqg\" (UID: \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\") " pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.708678 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-config\") pod \"controller-manager-d49cf5dcf-6dfqg\" (UID: \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\") " pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.708739 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-proxy-ca-bundles\") pod \"controller-manager-d49cf5dcf-6dfqg\" (UID: \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\") " pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.708817 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-serving-cert\") pod \"controller-manager-d49cf5dcf-6dfqg\" (UID: \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\") " pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.708838 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flj2k\" (UniqueName: \"kubernetes.io/projected/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-kube-api-access-flj2k\") pod \"controller-manager-d49cf5dcf-6dfqg\" (UID: \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\") " pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.708868 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-client-ca\") pod \"controller-manager-d49cf5dcf-6dfqg\" (UID: \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\") " pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.709929 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-client-ca\") pod \"controller-manager-d49cf5dcf-6dfqg\" (UID: \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\") " pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.710041 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-proxy-ca-bundles\") pod \"controller-manager-d49cf5dcf-6dfqg\" (UID: \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\") " pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.710388 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-config\") pod \"controller-manager-d49cf5dcf-6dfqg\" (UID: \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\") " pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.714551 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-serving-cert\") pod \"controller-manager-d49cf5dcf-6dfqg\" (UID: \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\") " pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.731403 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flj2k\" (UniqueName: \"kubernetes.io/projected/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-kube-api-access-flj2k\") pod \"controller-manager-d49cf5dcf-6dfqg\" (UID: \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\") " pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" Dec 13 06:33:12 crc kubenswrapper[5048]: I1213 06:33:12.834955 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" Dec 13 06:33:13 crc kubenswrapper[5048]: I1213 06:33:13.048340 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg"] Dec 13 06:33:13 crc kubenswrapper[5048]: W1213 06:33:13.062330 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d68a6cf_5a1b_40bd_9502_a81d117ee6d2.slice/crio-aaa7906dc8697f01ae735eec3eb3b0f2240f37b754391ac41fb26eed30ec9798 WatchSource:0}: Error finding container aaa7906dc8697f01ae735eec3eb3b0f2240f37b754391ac41fb26eed30ec9798: Status 404 returned error can't find the container with id aaa7906dc8697f01ae735eec3eb3b0f2240f37b754391ac41fb26eed30ec9798 Dec 13 06:33:13 crc kubenswrapper[5048]: I1213 06:33:13.111968 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-tm62z" event={"ID":"226b24e2-92c6-43d1-a621-09702ffa8fd4","Type":"ContainerStarted","Data":"0ccbcf918cc8b947f13ed4a6da0c2b712d78526af12601495ecde783ec0c05ef"} Dec 13 06:33:13 crc kubenswrapper[5048]: I1213 06:33:13.114712 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" event={"ID":"da283285-f622-4286-be22-0c3e4d244cd1","Type":"ContainerStarted","Data":"783949f7baae5c962cd7a7398449fc30d62cceb3ac4168ac75ca3d891359bca8"} Dec 13 06:33:13 crc kubenswrapper[5048]: I1213 06:33:13.115159 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" Dec 13 06:33:13 crc kubenswrapper[5048]: I1213 06:33:13.116486 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" event={"ID":"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2","Type":"ContainerStarted","Data":"aaa7906dc8697f01ae735eec3eb3b0f2240f37b754391ac41fb26eed30ec9798"} Dec 13 06:33:13 crc kubenswrapper[5048]: I1213 06:33:13.120157 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"268d907d-1730-456e-8e52-67e58aca607b","Type":"ContainerStarted","Data":"c8f1f04a596aad23301a369c0d4f27bff3633c4555e8593f50e39f6cc5ac30b6"} Dec 13 06:33:13 crc kubenswrapper[5048]: I1213 06:33:13.120229 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"268d907d-1730-456e-8e52-67e58aca607b","Type":"ContainerStarted","Data":"273cb1e4ab78e778a25e8c556d3664a544cce9de01598c9bd0215f8c39ce448b"} Dec 13 06:33:13 crc kubenswrapper[5048]: I1213 06:33:13.121079 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" Dec 13 06:33:13 crc kubenswrapper[5048]: I1213 06:33:13.126044 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"63a58dec-0fcf-4425-820a-19f710e3d0d3","Type":"ContainerDied","Data":"5aa3c6e20a8808e180687b7e308b7b8782a7c0a07ee5b5a5ad1b898e06b7afea"} Dec 13 06:33:13 crc kubenswrapper[5048]: I1213 06:33:13.126957 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-tm62z" podStartSLOduration=204.12693701 podStartE2EDuration="3m24.12693701s" podCreationTimestamp="2025-12-13 06:29:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:33:13.125622963 +0000 UTC m=+226.992217554" watchObservedRunningTime="2025-12-13 06:33:13.12693701 +0000 UTC m=+226.993531591" Dec 13 06:33:13 crc kubenswrapper[5048]: I1213 06:33:13.125841 5048 generic.go:334] "Generic (PLEG): container finished" podID="63a58dec-0fcf-4425-820a-19f710e3d0d3" containerID="5aa3c6e20a8808e180687b7e308b7b8782a7c0a07ee5b5a5ad1b898e06b7afea" exitCode=0 Dec 13 06:33:13 crc kubenswrapper[5048]: I1213 06:33:13.131086 5048 patch_prober.go:28] interesting pod/downloads-7954f5f757-xclb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Dec 13 06:33:13 crc kubenswrapper[5048]: I1213 06:33:13.131126 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xclb7" podUID="f567d62b-7941-466c-84c8-06f6854000ba" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Dec 13 06:33:13 crc kubenswrapper[5048]: I1213 06:33:13.143215 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" podStartSLOduration=32.143195379 podStartE2EDuration="32.143195379s" podCreationTimestamp="2025-12-13 06:32:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:33:13.140719651 +0000 UTC m=+227.007314242" watchObservedRunningTime="2025-12-13 06:33:13.143195379 +0000 UTC m=+227.009789960" Dec 13 06:33:13 crc kubenswrapper[5048]: I1213 06:33:13.186151 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=23.186131038 podStartE2EDuration="23.186131038s" podCreationTimestamp="2025-12-13 06:32:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:33:13.165218609 +0000 UTC m=+227.031813230" watchObservedRunningTime="2025-12-13 06:33:13.186131038 +0000 UTC m=+227.052725619" Dec 13 06:33:14 crc kubenswrapper[5048]: I1213 06:33:14.139229 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" event={"ID":"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2","Type":"ContainerStarted","Data":"cd6bb8f71ef92393849a250d9abd020f5b6eedc4d1f76504f64226265aabf72e"} Dec 13 06:33:14 crc kubenswrapper[5048]: I1213 06:33:14.140877 5048 patch_prober.go:28] interesting pod/downloads-7954f5f757-xclb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Dec 13 06:33:14 crc kubenswrapper[5048]: I1213 06:33:14.140918 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xclb7" podUID="f567d62b-7941-466c-84c8-06f6854000ba" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Dec 13 06:33:14 crc kubenswrapper[5048]: I1213 06:33:14.162610 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" podStartSLOduration=33.162591649 podStartE2EDuration="33.162591649s" podCreationTimestamp="2025-12-13 06:32:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:33:14.159490003 +0000 UTC m=+228.026084604" watchObservedRunningTime="2025-12-13 06:33:14.162591649 +0000 UTC m=+228.029186230" Dec 13 06:33:14 crc kubenswrapper[5048]: I1213 06:33:14.441300 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 13 06:33:14 crc kubenswrapper[5048]: I1213 06:33:14.568819 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/63a58dec-0fcf-4425-820a-19f710e3d0d3-kubelet-dir\") pod \"63a58dec-0fcf-4425-820a-19f710e3d0d3\" (UID: \"63a58dec-0fcf-4425-820a-19f710e3d0d3\") " Dec 13 06:33:14 crc kubenswrapper[5048]: I1213 06:33:14.568920 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/63a58dec-0fcf-4425-820a-19f710e3d0d3-kube-api-access\") pod \"63a58dec-0fcf-4425-820a-19f710e3d0d3\" (UID: \"63a58dec-0fcf-4425-820a-19f710e3d0d3\") " Dec 13 06:33:14 crc kubenswrapper[5048]: I1213 06:33:14.569002 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/63a58dec-0fcf-4425-820a-19f710e3d0d3-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "63a58dec-0fcf-4425-820a-19f710e3d0d3" (UID: "63a58dec-0fcf-4425-820a-19f710e3d0d3"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:33:14 crc kubenswrapper[5048]: I1213 06:33:14.570212 5048 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/63a58dec-0fcf-4425-820a-19f710e3d0d3-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:14 crc kubenswrapper[5048]: I1213 06:33:14.578253 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63a58dec-0fcf-4425-820a-19f710e3d0d3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "63a58dec-0fcf-4425-820a-19f710e3d0d3" (UID: "63a58dec-0fcf-4425-820a-19f710e3d0d3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:33:14 crc kubenswrapper[5048]: I1213 06:33:14.671731 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/63a58dec-0fcf-4425-820a-19f710e3d0d3-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:15 crc kubenswrapper[5048]: I1213 06:33:15.308111 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"63a58dec-0fcf-4425-820a-19f710e3d0d3","Type":"ContainerDied","Data":"5678a6a06e9ad408e418d54d2136cffb741fba7f96effd4b06123e080d649c6d"} Dec 13 06:33:15 crc kubenswrapper[5048]: I1213 06:33:15.308249 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5678a6a06e9ad408e418d54d2136cffb741fba7f96effd4b06123e080d649c6d" Dec 13 06:33:15 crc kubenswrapper[5048]: I1213 06:33:15.308411 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 13 06:33:15 crc kubenswrapper[5048]: I1213 06:33:15.308970 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" Dec 13 06:33:15 crc kubenswrapper[5048]: I1213 06:33:15.313735 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" Dec 13 06:33:16 crc kubenswrapper[5048]: I1213 06:33:16.216461 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:33:16 crc kubenswrapper[5048]: I1213 06:33:16.216543 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:33:16 crc kubenswrapper[5048]: I1213 06:33:16.216602 5048 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 06:33:16 crc kubenswrapper[5048]: I1213 06:33:16.217376 5048 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95"} pod="openshift-machine-config-operator/machine-config-daemon-j7hns" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 13 06:33:16 crc kubenswrapper[5048]: I1213 06:33:16.217469 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" containerID="cri-o://a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95" gracePeriod=600 Dec 13 06:33:18 crc kubenswrapper[5048]: I1213 06:33:18.327609 5048 generic.go:334] "Generic (PLEG): container finished" podID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerID="a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95" exitCode=0 Dec 13 06:33:18 crc kubenswrapper[5048]: I1213 06:33:18.327702 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerDied","Data":"a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95"} Dec 13 06:33:20 crc kubenswrapper[5048]: I1213 06:33:20.350484 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerStarted","Data":"b5193378b65ca2050a83545e0498527657cf2baaa1e10184e3d174b0ace1867e"} Dec 13 06:33:20 crc kubenswrapper[5048]: I1213 06:33:20.351821 5048 patch_prober.go:28] interesting pod/downloads-7954f5f757-xclb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Dec 13 06:33:20 crc kubenswrapper[5048]: I1213 06:33:20.351875 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-xclb7" podUID="f567d62b-7941-466c-84c8-06f6854000ba" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Dec 13 06:33:20 crc kubenswrapper[5048]: I1213 06:33:20.352396 5048 patch_prober.go:28] interesting pod/downloads-7954f5f757-xclb7 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" start-of-body= Dec 13 06:33:20 crc kubenswrapper[5048]: I1213 06:33:20.352425 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-xclb7" podUID="f567d62b-7941-466c-84c8-06f6854000ba" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.16:8080/\": dial tcp 10.217.0.16:8080: connect: connection refused" Dec 13 06:33:24 crc kubenswrapper[5048]: I1213 06:33:24.385835 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmqg7" event={"ID":"16d5a4f5-03a1-46f1-990b-f69dc88f1e9d","Type":"ContainerStarted","Data":"6b0387b7aaed67a4ce5b15785056b60089732db6331d2d15cc45f189d4b7500e"} Dec 13 06:33:25 crc kubenswrapper[5048]: I1213 06:33:25.397959 5048 generic.go:334] "Generic (PLEG): container finished" podID="16d5a4f5-03a1-46f1-990b-f69dc88f1e9d" containerID="6b0387b7aaed67a4ce5b15785056b60089732db6331d2d15cc45f189d4b7500e" exitCode=0 Dec 13 06:33:25 crc kubenswrapper[5048]: I1213 06:33:25.398083 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmqg7" event={"ID":"16d5a4f5-03a1-46f1-990b-f69dc88f1e9d","Type":"ContainerDied","Data":"6b0387b7aaed67a4ce5b15785056b60089732db6331d2d15cc45f189d4b7500e"} Dec 13 06:33:25 crc kubenswrapper[5048]: I1213 06:33:25.402132 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k9jhn" event={"ID":"87790266-395a-4687-895f-f524c8b2171b","Type":"ContainerStarted","Data":"5310fa235bb5de9affb9933ae0cef9998b36a58dd6f9ecf3ccdf00d66dde1087"} Dec 13 06:33:28 crc kubenswrapper[5048]: I1213 06:33:28.425409 5048 generic.go:334] "Generic (PLEG): container finished" podID="87790266-395a-4687-895f-f524c8b2171b" containerID="5310fa235bb5de9affb9933ae0cef9998b36a58dd6f9ecf3ccdf00d66dde1087" exitCode=0 Dec 13 06:33:28 crc kubenswrapper[5048]: I1213 06:33:28.425498 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k9jhn" event={"ID":"87790266-395a-4687-895f-f524c8b2171b","Type":"ContainerDied","Data":"5310fa235bb5de9affb9933ae0cef9998b36a58dd6f9ecf3ccdf00d66dde1087"} Dec 13 06:33:30 crc kubenswrapper[5048]: I1213 06:33:30.366087 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-xclb7" Dec 13 06:33:31 crc kubenswrapper[5048]: I1213 06:33:31.445394 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vfwpt" event={"ID":"07531c82-87d1-409f-9c5a-4910633b5786","Type":"ContainerStarted","Data":"88945c341701d4be8dfd08598b74c69f11666d3afa4c7f19e8a0b7734878a207"} Dec 13 06:33:32 crc kubenswrapper[5048]: I1213 06:33:32.450697 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l9l5s" event={"ID":"6885bcbf-dd86-4e5a-bd21-92395c5ae676","Type":"ContainerStarted","Data":"d06f0e5a8ec6c60d1a528bfcc23ddb2a1e0ef241d70b480e2500674a86d88b89"} Dec 13 06:33:32 crc kubenswrapper[5048]: I1213 06:33:32.453011 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lqvmp" event={"ID":"57717114-1abd-46bf-bdbd-0a785d734cd3","Type":"ContainerStarted","Data":"dfd858171332a3f68676b6da6792a1c9ce21c9c0130e32d8ce97a3ca6220dbd9"} Dec 13 06:33:32 crc kubenswrapper[5048]: I1213 06:33:32.455711 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hjls5" event={"ID":"1ea76a75-e8de-4a91-89af-726df36e8a21","Type":"ContainerStarted","Data":"37e5ab0ae5d487c0c01e40129b68fcc8e0a8b2d9eb0b1242919febc65258425d"} Dec 13 06:33:32 crc kubenswrapper[5048]: I1213 06:33:32.458477 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q8ltl" event={"ID":"f7b93b87-31d6-4279-8ac9-b834417f66d9","Type":"ContainerStarted","Data":"1fd54bc6bd95d53fe9009107043714905f8aa7fc103915bea961493ba9c4e603"} Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.363907 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-wcscw"] Dec 13 06:33:33 crc kubenswrapper[5048]: E1213 06:33:33.364137 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63a58dec-0fcf-4425-820a-19f710e3d0d3" containerName="pruner" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.364151 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="63a58dec-0fcf-4425-820a-19f710e3d0d3" containerName="pruner" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.364283 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="63a58dec-0fcf-4425-820a-19f710e3d0d3" containerName="pruner" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.364779 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.385605 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-wcscw"] Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.487323 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/adb2a7ef-7786-408c-9e37-2b577af9b3ac-trusted-ca\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.487499 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/adb2a7ef-7786-408c-9e37-2b577af9b3ac-registry-certificates\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.487586 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2hhq\" (UniqueName: \"kubernetes.io/projected/adb2a7ef-7786-408c-9e37-2b577af9b3ac-kube-api-access-b2hhq\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.487812 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/adb2a7ef-7786-408c-9e37-2b577af9b3ac-registry-tls\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.487880 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.487910 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/adb2a7ef-7786-408c-9e37-2b577af9b3ac-bound-sa-token\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.487945 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/adb2a7ef-7786-408c-9e37-2b577af9b3ac-ca-trust-extracted\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.487993 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/adb2a7ef-7786-408c-9e37-2b577af9b3ac-installation-pull-secrets\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.515496 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.588889 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/adb2a7ef-7786-408c-9e37-2b577af9b3ac-ca-trust-extracted\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.588938 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/adb2a7ef-7786-408c-9e37-2b577af9b3ac-installation-pull-secrets\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.588967 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/adb2a7ef-7786-408c-9e37-2b577af9b3ac-trusted-ca\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.588996 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/adb2a7ef-7786-408c-9e37-2b577af9b3ac-registry-certificates\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.589014 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2hhq\" (UniqueName: \"kubernetes.io/projected/adb2a7ef-7786-408c-9e37-2b577af9b3ac-kube-api-access-b2hhq\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.589066 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/adb2a7ef-7786-408c-9e37-2b577af9b3ac-registry-tls\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.589088 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/adb2a7ef-7786-408c-9e37-2b577af9b3ac-bound-sa-token\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.590910 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/adb2a7ef-7786-408c-9e37-2b577af9b3ac-registry-certificates\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.593727 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/adb2a7ef-7786-408c-9e37-2b577af9b3ac-ca-trust-extracted\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.611323 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/adb2a7ef-7786-408c-9e37-2b577af9b3ac-installation-pull-secrets\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.620300 5048 generic.go:334] "Generic (PLEG): container finished" podID="1ea76a75-e8de-4a91-89af-726df36e8a21" containerID="37e5ab0ae5d487c0c01e40129b68fcc8e0a8b2d9eb0b1242919febc65258425d" exitCode=0 Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.620373 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hjls5" event={"ID":"1ea76a75-e8de-4a91-89af-726df36e8a21","Type":"ContainerDied","Data":"37e5ab0ae5d487c0c01e40129b68fcc8e0a8b2d9eb0b1242919febc65258425d"} Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.621064 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/adb2a7ef-7786-408c-9e37-2b577af9b3ac-bound-sa-token\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.621694 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/adb2a7ef-7786-408c-9e37-2b577af9b3ac-registry-tls\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.626631 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/adb2a7ef-7786-408c-9e37-2b577af9b3ac-trusted-ca\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.629267 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2hhq\" (UniqueName: \"kubernetes.io/projected/adb2a7ef-7786-408c-9e37-2b577af9b3ac-kube-api-access-b2hhq\") pod \"image-registry-66df7c8f76-wcscw\" (UID: \"adb2a7ef-7786-408c-9e37-2b577af9b3ac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.632211 5048 generic.go:334] "Generic (PLEG): container finished" podID="07531c82-87d1-409f-9c5a-4910633b5786" containerID="88945c341701d4be8dfd08598b74c69f11666d3afa4c7f19e8a0b7734878a207" exitCode=0 Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.632256 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vfwpt" event={"ID":"07531c82-87d1-409f-9c5a-4910633b5786","Type":"ContainerDied","Data":"88945c341701d4be8dfd08598b74c69f11666d3afa4c7f19e8a0b7734878a207"} Dec 13 06:33:33 crc kubenswrapper[5048]: I1213 06:33:33.678205 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:34 crc kubenswrapper[5048]: I1213 06:33:34.638780 5048 generic.go:334] "Generic (PLEG): container finished" podID="57717114-1abd-46bf-bdbd-0a785d734cd3" containerID="dfd858171332a3f68676b6da6792a1c9ce21c9c0130e32d8ce97a3ca6220dbd9" exitCode=0 Dec 13 06:33:34 crc kubenswrapper[5048]: I1213 06:33:34.638884 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lqvmp" event={"ID":"57717114-1abd-46bf-bdbd-0a785d734cd3","Type":"ContainerDied","Data":"dfd858171332a3f68676b6da6792a1c9ce21c9c0130e32d8ce97a3ca6220dbd9"} Dec 13 06:33:34 crc kubenswrapper[5048]: I1213 06:33:34.640933 5048 generic.go:334] "Generic (PLEG): container finished" podID="f7b93b87-31d6-4279-8ac9-b834417f66d9" containerID="1fd54bc6bd95d53fe9009107043714905f8aa7fc103915bea961493ba9c4e603" exitCode=0 Dec 13 06:33:34 crc kubenswrapper[5048]: I1213 06:33:34.640979 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q8ltl" event={"ID":"f7b93b87-31d6-4279-8ac9-b834417f66d9","Type":"ContainerDied","Data":"1fd54bc6bd95d53fe9009107043714905f8aa7fc103915bea961493ba9c4e603"} Dec 13 06:33:38 crc kubenswrapper[5048]: I1213 06:33:38.667055 5048 generic.go:334] "Generic (PLEG): container finished" podID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" containerID="d06f0e5a8ec6c60d1a528bfcc23ddb2a1e0ef241d70b480e2500674a86d88b89" exitCode=0 Dec 13 06:33:38 crc kubenswrapper[5048]: I1213 06:33:38.667146 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l9l5s" event={"ID":"6885bcbf-dd86-4e5a-bd21-92395c5ae676","Type":"ContainerDied","Data":"d06f0e5a8ec6c60d1a528bfcc23ddb2a1e0ef241d70b480e2500674a86d88b89"} Dec 13 06:33:39 crc kubenswrapper[5048]: I1213 06:33:39.412422 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-wcscw"] Dec 13 06:33:39 crc kubenswrapper[5048]: W1213 06:33:39.429280 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podadb2a7ef_7786_408c_9e37_2b577af9b3ac.slice/crio-92173a8ef364cb51b9e1a57e5238c93f5a81bf44538ac9cdfcb54ff4186b725a WatchSource:0}: Error finding container 92173a8ef364cb51b9e1a57e5238c93f5a81bf44538ac9cdfcb54ff4186b725a: Status 404 returned error can't find the container with id 92173a8ef364cb51b9e1a57e5238c93f5a81bf44538ac9cdfcb54ff4186b725a Dec 13 06:33:39 crc kubenswrapper[5048]: I1213 06:33:39.677253 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" event={"ID":"adb2a7ef-7786-408c-9e37-2b577af9b3ac","Type":"ContainerStarted","Data":"92173a8ef364cb51b9e1a57e5238c93f5a81bf44538ac9cdfcb54ff4186b725a"} Dec 13 06:33:39 crc kubenswrapper[5048]: I1213 06:33:39.679781 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k9jhn" event={"ID":"87790266-395a-4687-895f-f524c8b2171b","Type":"ContainerStarted","Data":"7ef4c810674b98a5a6f1511a55a423ec25e57323b26b6420cc673e440efd49dd"} Dec 13 06:33:39 crc kubenswrapper[5048]: I1213 06:33:39.701370 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-k9jhn" podStartSLOduration=9.325875534 podStartE2EDuration="1m35.701350322s" podCreationTimestamp="2025-12-13 06:32:04 +0000 UTC" firstStartedPulling="2025-12-13 06:32:06.918804978 +0000 UTC m=+160.785399559" lastFinishedPulling="2025-12-13 06:33:33.294279766 +0000 UTC m=+247.160874347" observedRunningTime="2025-12-13 06:33:39.69801263 +0000 UTC m=+253.564607231" watchObservedRunningTime="2025-12-13 06:33:39.701350322 +0000 UTC m=+253.567944903" Dec 13 06:33:40 crc kubenswrapper[5048]: I1213 06:33:40.689145 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" event={"ID":"adb2a7ef-7786-408c-9e37-2b577af9b3ac","Type":"ContainerStarted","Data":"7e9d24e617cd784919621e4431d6893f7cc95ae88b066ca6843b8c5a0586e742"} Dec 13 06:33:40 crc kubenswrapper[5048]: I1213 06:33:40.693574 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmqg7" event={"ID":"16d5a4f5-03a1-46f1-990b-f69dc88f1e9d","Type":"ContainerStarted","Data":"a3d29aeb099a26394bc87634e55189778816c9653e97ff7352f2a9b8eb034bc6"} Dec 13 06:33:40 crc kubenswrapper[5048]: I1213 06:33:40.844155 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-dwk4m"] Dec 13 06:33:41 crc kubenswrapper[5048]: I1213 06:33:41.113153 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg"] Dec 13 06:33:41 crc kubenswrapper[5048]: I1213 06:33:41.114028 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" podUID="1d68a6cf-5a1b-40bd-9502-a81d117ee6d2" containerName="controller-manager" containerID="cri-o://cd6bb8f71ef92393849a250d9abd020f5b6eedc4d1f76504f64226265aabf72e" gracePeriod=30 Dec 13 06:33:41 crc kubenswrapper[5048]: I1213 06:33:41.227612 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz"] Dec 13 06:33:41 crc kubenswrapper[5048]: I1213 06:33:41.228592 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" podUID="da283285-f622-4286-be22-0c3e4d244cd1" containerName="route-controller-manager" containerID="cri-o://783949f7baae5c962cd7a7398449fc30d62cceb3ac4168ac75ca3d891359bca8" gracePeriod=30 Dec 13 06:33:41 crc kubenswrapper[5048]: I1213 06:33:41.705179 5048 generic.go:334] "Generic (PLEG): container finished" podID="1d68a6cf-5a1b-40bd-9502-a81d117ee6d2" containerID="cd6bb8f71ef92393849a250d9abd020f5b6eedc4d1f76504f64226265aabf72e" exitCode=0 Dec 13 06:33:41 crc kubenswrapper[5048]: I1213 06:33:41.705292 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" event={"ID":"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2","Type":"ContainerDied","Data":"cd6bb8f71ef92393849a250d9abd020f5b6eedc4d1f76504f64226265aabf72e"} Dec 13 06:33:41 crc kubenswrapper[5048]: I1213 06:33:41.712830 5048 generic.go:334] "Generic (PLEG): container finished" podID="da283285-f622-4286-be22-0c3e4d244cd1" containerID="783949f7baae5c962cd7a7398449fc30d62cceb3ac4168ac75ca3d891359bca8" exitCode=0 Dec 13 06:33:41 crc kubenswrapper[5048]: I1213 06:33:41.712951 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" event={"ID":"da283285-f622-4286-be22-0c3e4d244cd1","Type":"ContainerDied","Data":"783949f7baae5c962cd7a7398449fc30d62cceb3ac4168ac75ca3d891359bca8"} Dec 13 06:33:41 crc kubenswrapper[5048]: I1213 06:33:41.714217 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:33:41 crc kubenswrapper[5048]: I1213 06:33:41.738550 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dmqg7" podStartSLOduration=6.247119001 podStartE2EDuration="1m38.738525436s" podCreationTimestamp="2025-12-13 06:32:03 +0000 UTC" firstStartedPulling="2025-12-13 06:32:06.979822127 +0000 UTC m=+160.846416708" lastFinishedPulling="2025-12-13 06:33:39.471228562 +0000 UTC m=+253.337823143" observedRunningTime="2025-12-13 06:33:41.735649087 +0000 UTC m=+255.602243688" watchObservedRunningTime="2025-12-13 06:33:41.738525436 +0000 UTC m=+255.605120017" Dec 13 06:33:41 crc kubenswrapper[5048]: I1213 06:33:41.763299 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" podStartSLOduration=8.763278002 podStartE2EDuration="8.763278002s" podCreationTimestamp="2025-12-13 06:33:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:33:41.760577917 +0000 UTC m=+255.627172528" watchObservedRunningTime="2025-12-13 06:33:41.763278002 +0000 UTC m=+255.629872583" Dec 13 06:33:42 crc kubenswrapper[5048]: I1213 06:33:42.836969 5048 patch_prober.go:28] interesting pod/controller-manager-d49cf5dcf-6dfqg container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.57:8443/healthz\": dial tcp 10.217.0.57:8443: connect: connection refused" start-of-body= Dec 13 06:33:42 crc kubenswrapper[5048]: I1213 06:33:42.837073 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" podUID="1d68a6cf-5a1b-40bd-9502-a81d117ee6d2" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.57:8443/healthz\": dial tcp 10.217.0.57:8443: connect: connection refused" Dec 13 06:33:43 crc kubenswrapper[5048]: I1213 06:33:43.588892 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dmqg7" Dec 13 06:33:43 crc kubenswrapper[5048]: I1213 06:33:43.590886 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dmqg7" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.021161 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.059947 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d"] Dec 13 06:33:44 crc kubenswrapper[5048]: E1213 06:33:44.060342 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da283285-f622-4286-be22-0c3e4d244cd1" containerName="route-controller-manager" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.060360 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="da283285-f622-4286-be22-0c3e4d244cd1" containerName="route-controller-manager" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.060493 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="da283285-f622-4286-be22-0c3e4d244cd1" containerName="route-controller-manager" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.061054 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.093377 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.128172 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d"] Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.172127 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da283285-f622-4286-be22-0c3e4d244cd1-serving-cert\") pod \"da283285-f622-4286-be22-0c3e4d244cd1\" (UID: \"da283285-f622-4286-be22-0c3e4d244cd1\") " Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.172207 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-flj2k\" (UniqueName: \"kubernetes.io/projected/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-kube-api-access-flj2k\") pod \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\" (UID: \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\") " Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.172254 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-config\") pod \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\" (UID: \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\") " Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.172322 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-serving-cert\") pod \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\" (UID: \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\") " Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.172371 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-client-ca\") pod \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\" (UID: \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\") " Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.172471 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-proxy-ca-bundles\") pod \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\" (UID: \"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2\") " Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.172565 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mbkfl\" (UniqueName: \"kubernetes.io/projected/da283285-f622-4286-be22-0c3e4d244cd1-kube-api-access-mbkfl\") pod \"da283285-f622-4286-be22-0c3e4d244cd1\" (UID: \"da283285-f622-4286-be22-0c3e4d244cd1\") " Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.172606 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da283285-f622-4286-be22-0c3e4d244cd1-client-ca\") pod \"da283285-f622-4286-be22-0c3e4d244cd1\" (UID: \"da283285-f622-4286-be22-0c3e4d244cd1\") " Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.172628 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da283285-f622-4286-be22-0c3e4d244cd1-config\") pod \"da283285-f622-4286-be22-0c3e4d244cd1\" (UID: \"da283285-f622-4286-be22-0c3e4d244cd1\") " Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.172849 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-serving-cert\") pod \"route-controller-manager-74dc84cd6c-9769d\" (UID: \"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284\") " pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.172962 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snkhn\" (UniqueName: \"kubernetes.io/projected/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-kube-api-access-snkhn\") pod \"route-controller-manager-74dc84cd6c-9769d\" (UID: \"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284\") " pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.172994 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-config\") pod \"route-controller-manager-74dc84cd6c-9769d\" (UID: \"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284\") " pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.173015 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-client-ca\") pod \"route-controller-manager-74dc84cd6c-9769d\" (UID: \"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284\") " pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.173555 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-client-ca" (OuterVolumeSpecName: "client-ca") pod "1d68a6cf-5a1b-40bd-9502-a81d117ee6d2" (UID: "1d68a6cf-5a1b-40bd-9502-a81d117ee6d2"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.174183 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da283285-f622-4286-be22-0c3e4d244cd1-config" (OuterVolumeSpecName: "config") pod "da283285-f622-4286-be22-0c3e4d244cd1" (UID: "da283285-f622-4286-be22-0c3e4d244cd1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.174223 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da283285-f622-4286-be22-0c3e4d244cd1-client-ca" (OuterVolumeSpecName: "client-ca") pod "da283285-f622-4286-be22-0c3e4d244cd1" (UID: "da283285-f622-4286-be22-0c3e4d244cd1"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.174262 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "1d68a6cf-5a1b-40bd-9502-a81d117ee6d2" (UID: "1d68a6cf-5a1b-40bd-9502-a81d117ee6d2"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.174412 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-config" (OuterVolumeSpecName: "config") pod "1d68a6cf-5a1b-40bd-9502-a81d117ee6d2" (UID: "1d68a6cf-5a1b-40bd-9502-a81d117ee6d2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.181391 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da283285-f622-4286-be22-0c3e4d244cd1-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "da283285-f622-4286-be22-0c3e4d244cd1" (UID: "da283285-f622-4286-be22-0c3e4d244cd1"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.181418 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-kube-api-access-flj2k" (OuterVolumeSpecName: "kube-api-access-flj2k") pod "1d68a6cf-5a1b-40bd-9502-a81d117ee6d2" (UID: "1d68a6cf-5a1b-40bd-9502-a81d117ee6d2"). InnerVolumeSpecName "kube-api-access-flj2k". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.182604 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da283285-f622-4286-be22-0c3e4d244cd1-kube-api-access-mbkfl" (OuterVolumeSpecName: "kube-api-access-mbkfl") pod "da283285-f622-4286-be22-0c3e4d244cd1" (UID: "da283285-f622-4286-be22-0c3e4d244cd1"). InnerVolumeSpecName "kube-api-access-mbkfl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.183317 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1d68a6cf-5a1b-40bd-9502-a81d117ee6d2" (UID: "1d68a6cf-5a1b-40bd-9502-a81d117ee6d2"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.190237 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dmqg7" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.274213 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snkhn\" (UniqueName: \"kubernetes.io/projected/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-kube-api-access-snkhn\") pod \"route-controller-manager-74dc84cd6c-9769d\" (UID: \"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284\") " pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.274308 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-config\") pod \"route-controller-manager-74dc84cd6c-9769d\" (UID: \"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284\") " pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.274337 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-client-ca\") pod \"route-controller-manager-74dc84cd6c-9769d\" (UID: \"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284\") " pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.274379 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-serving-cert\") pod \"route-controller-manager-74dc84cd6c-9769d\" (UID: \"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284\") " pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.274538 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da283285-f622-4286-be22-0c3e4d244cd1-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.274588 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-flj2k\" (UniqueName: \"kubernetes.io/projected/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-kube-api-access-flj2k\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.274607 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.274620 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.274634 5048 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-client-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.274644 5048 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.274656 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mbkfl\" (UniqueName: \"kubernetes.io/projected/da283285-f622-4286-be22-0c3e4d244cd1-kube-api-access-mbkfl\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.274668 5048 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da283285-f622-4286-be22-0c3e4d244cd1-client-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.274684 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da283285-f622-4286-be22-0c3e4d244cd1-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.275888 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-client-ca\") pod \"route-controller-manager-74dc84cd6c-9769d\" (UID: \"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284\") " pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.277095 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-config\") pod \"route-controller-manager-74dc84cd6c-9769d\" (UID: \"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284\") " pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.280030 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-serving-cert\") pod \"route-controller-manager-74dc84cd6c-9769d\" (UID: \"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284\") " pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.295142 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snkhn\" (UniqueName: \"kubernetes.io/projected/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-kube-api-access-snkhn\") pod \"route-controller-manager-74dc84cd6c-9769d\" (UID: \"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284\") " pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.360078 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hjls5"] Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.373294 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lqvmp"] Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.383494 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q8ltl"] Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.390084 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vfwpt"] Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.401540 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cwvrg"] Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.402906 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" podUID="6111e5a8-1616-417d-a3d6-d7b1a39ec709" containerName="marketplace-operator" containerID="cri-o://b7148a268f666f5da3f9e1c5abb098bd39089e64356c5a1497cb5e28edb25d4d" gracePeriod=30 Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.411883 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dmqg7"] Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.422504 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rb7fl"] Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.423206 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.431801 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-zm7qx"] Dec 13 06:33:44 crc kubenswrapper[5048]: E1213 06:33:44.432309 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d68a6cf-5a1b-40bd-9502-a81d117ee6d2" containerName="controller-manager" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.432338 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d68a6cf-5a1b-40bd-9502-a81d117ee6d2" containerName="controller-manager" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.432556 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d68a6cf-5a1b-40bd-9502-a81d117ee6d2" containerName="controller-manager" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.433287 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.436095 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-k9jhn"] Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.436379 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-k9jhn" podUID="87790266-395a-4687-895f-f524c8b2171b" containerName="registry-server" containerID="cri-o://7ef4c810674b98a5a6f1511a55a423ec25e57323b26b6420cc673e440efd49dd" gracePeriod=30 Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.443531 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-l9l5s"] Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.465335 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-zm7qx"] Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.580584 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/60f5ae10-2f86-46f8-b613-f017b8753690-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-zm7qx\" (UID: \"60f5ae10-2f86-46f8-b613-f017b8753690\") " pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.580654 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/60f5ae10-2f86-46f8-b613-f017b8753690-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-zm7qx\" (UID: \"60f5ae10-2f86-46f8-b613-f017b8753690\") " pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.580879 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4pql\" (UniqueName: \"kubernetes.io/projected/60f5ae10-2f86-46f8-b613-f017b8753690-kube-api-access-w4pql\") pod \"marketplace-operator-79b997595-zm7qx\" (UID: \"60f5ae10-2f86-46f8-b613-f017b8753690\") " pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.683167 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/60f5ae10-2f86-46f8-b613-f017b8753690-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-zm7qx\" (UID: \"60f5ae10-2f86-46f8-b613-f017b8753690\") " pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.683255 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/60f5ae10-2f86-46f8-b613-f017b8753690-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-zm7qx\" (UID: \"60f5ae10-2f86-46f8-b613-f017b8753690\") " pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.683279 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4pql\" (UniqueName: \"kubernetes.io/projected/60f5ae10-2f86-46f8-b613-f017b8753690-kube-api-access-w4pql\") pod \"marketplace-operator-79b997595-zm7qx\" (UID: \"60f5ae10-2f86-46f8-b613-f017b8753690\") " pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.685401 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/60f5ae10-2f86-46f8-b613-f017b8753690-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-zm7qx\" (UID: \"60f5ae10-2f86-46f8-b613-f017b8753690\") " pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.687854 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/60f5ae10-2f86-46f8-b613-f017b8753690-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-zm7qx\" (UID: \"60f5ae10-2f86-46f8-b613-f017b8753690\") " pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.723150 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4pql\" (UniqueName: \"kubernetes.io/projected/60f5ae10-2f86-46f8-b613-f017b8753690-kube-api-access-w4pql\") pod \"marketplace-operator-79b997595-zm7qx\" (UID: \"60f5ae10-2f86-46f8-b613-f017b8753690\") " pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.732901 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" event={"ID":"da283285-f622-4286-be22-0c3e4d244cd1","Type":"ContainerDied","Data":"38ad9017e2f5ca54aa86a25f606ee16747224a84fa410050010831fccc0ba2ec"} Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.732953 5048 scope.go:117] "RemoveContainer" containerID="783949f7baae5c962cd7a7398449fc30d62cceb3ac4168ac75ca3d891359bca8" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.733014 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.737151 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" event={"ID":"1d68a6cf-5a1b-40bd-9502-a81d117ee6d2","Type":"ContainerDied","Data":"aaa7906dc8697f01ae735eec3eb3b0f2240f37b754391ac41fb26eed30ec9798"} Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.737468 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.737497 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dmqg7" podUID="16d5a4f5-03a1-46f1-990b-f69dc88f1e9d" containerName="registry-server" containerID="cri-o://a3d29aeb099a26394bc87634e55189778816c9653e97ff7352f2a9b8eb034bc6" gracePeriod=30 Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.758129 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.769588 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz"] Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.775668 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-99ff448dc-2ckpz"] Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.781271 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg"] Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.785361 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-d49cf5dcf-6dfqg"] Dec 13 06:33:44 crc kubenswrapper[5048]: I1213 06:33:44.910178 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-k9jhn" Dec 13 06:33:45 crc kubenswrapper[5048]: I1213 06:33:45.746169 5048 generic.go:334] "Generic (PLEG): container finished" podID="6111e5a8-1616-417d-a3d6-d7b1a39ec709" containerID="b7148a268f666f5da3f9e1c5abb098bd39089e64356c5a1497cb5e28edb25d4d" exitCode=0 Dec 13 06:33:45 crc kubenswrapper[5048]: I1213 06:33:45.746230 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" event={"ID":"6111e5a8-1616-417d-a3d6-d7b1a39ec709","Type":"ContainerDied","Data":"b7148a268f666f5da3f9e1c5abb098bd39089e64356c5a1497cb5e28edb25d4d"} Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.538069 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5d89566846-prjbf"] Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.538917 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.541926 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.542191 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.542540 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.543607 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.543911 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.545713 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.556823 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.557019 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5d89566846-prjbf"] Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.581751 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d68a6cf-5a1b-40bd-9502-a81d117ee6d2" path="/var/lib/kubelet/pods/1d68a6cf-5a1b-40bd-9502-a81d117ee6d2/volumes" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.582534 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da283285-f622-4286-be22-0c3e4d244cd1" path="/var/lib/kubelet/pods/da283285-f622-4286-be22-0c3e4d244cd1/volumes" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.595715 5048 scope.go:117] "RemoveContainer" containerID="cd6bb8f71ef92393849a250d9abd020f5b6eedc4d1f76504f64226265aabf72e" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.616267 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/27b96e48-77ff-4f71-a919-9f30814704a7-serving-cert\") pod \"controller-manager-5d89566846-prjbf\" (UID: \"27b96e48-77ff-4f71-a919-9f30814704a7\") " pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.616492 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/27b96e48-77ff-4f71-a919-9f30814704a7-client-ca\") pod \"controller-manager-5d89566846-prjbf\" (UID: \"27b96e48-77ff-4f71-a919-9f30814704a7\") " pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.616671 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/27b96e48-77ff-4f71-a919-9f30814704a7-proxy-ca-bundles\") pod \"controller-manager-5d89566846-prjbf\" (UID: \"27b96e48-77ff-4f71-a919-9f30814704a7\") " pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.616787 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggczv\" (UniqueName: \"kubernetes.io/projected/27b96e48-77ff-4f71-a919-9f30814704a7-kube-api-access-ggczv\") pod \"controller-manager-5d89566846-prjbf\" (UID: \"27b96e48-77ff-4f71-a919-9f30814704a7\") " pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.616902 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27b96e48-77ff-4f71-a919-9f30814704a7-config\") pod \"controller-manager-5d89566846-prjbf\" (UID: \"27b96e48-77ff-4f71-a919-9f30814704a7\") " pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.718713 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/27b96e48-77ff-4f71-a919-9f30814704a7-proxy-ca-bundles\") pod \"controller-manager-5d89566846-prjbf\" (UID: \"27b96e48-77ff-4f71-a919-9f30814704a7\") " pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.719815 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/27b96e48-77ff-4f71-a919-9f30814704a7-proxy-ca-bundles\") pod \"controller-manager-5d89566846-prjbf\" (UID: \"27b96e48-77ff-4f71-a919-9f30814704a7\") " pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.719272 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggczv\" (UniqueName: \"kubernetes.io/projected/27b96e48-77ff-4f71-a919-9f30814704a7-kube-api-access-ggczv\") pod \"controller-manager-5d89566846-prjbf\" (UID: \"27b96e48-77ff-4f71-a919-9f30814704a7\") " pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.722075 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27b96e48-77ff-4f71-a919-9f30814704a7-config\") pod \"controller-manager-5d89566846-prjbf\" (UID: \"27b96e48-77ff-4f71-a919-9f30814704a7\") " pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.722288 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/27b96e48-77ff-4f71-a919-9f30814704a7-serving-cert\") pod \"controller-manager-5d89566846-prjbf\" (UID: \"27b96e48-77ff-4f71-a919-9f30814704a7\") " pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.722326 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/27b96e48-77ff-4f71-a919-9f30814704a7-client-ca\") pod \"controller-manager-5d89566846-prjbf\" (UID: \"27b96e48-77ff-4f71-a919-9f30814704a7\") " pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.723264 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27b96e48-77ff-4f71-a919-9f30814704a7-config\") pod \"controller-manager-5d89566846-prjbf\" (UID: \"27b96e48-77ff-4f71-a919-9f30814704a7\") " pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.724179 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/27b96e48-77ff-4f71-a919-9f30814704a7-client-ca\") pod \"controller-manager-5d89566846-prjbf\" (UID: \"27b96e48-77ff-4f71-a919-9f30814704a7\") " pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.730248 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/27b96e48-77ff-4f71-a919-9f30814704a7-serving-cert\") pod \"controller-manager-5d89566846-prjbf\" (UID: \"27b96e48-77ff-4f71-a919-9f30814704a7\") " pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.746632 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ggczv\" (UniqueName: \"kubernetes.io/projected/27b96e48-77ff-4f71-a919-9f30814704a7-kube-api-access-ggczv\") pod \"controller-manager-5d89566846-prjbf\" (UID: \"27b96e48-77ff-4f71-a919-9f30814704a7\") " pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.762652 5048 generic.go:334] "Generic (PLEG): container finished" podID="16d5a4f5-03a1-46f1-990b-f69dc88f1e9d" containerID="a3d29aeb099a26394bc87634e55189778816c9653e97ff7352f2a9b8eb034bc6" exitCode=0 Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.762742 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmqg7" event={"ID":"16d5a4f5-03a1-46f1-990b-f69dc88f1e9d","Type":"ContainerDied","Data":"a3d29aeb099a26394bc87634e55189778816c9653e97ff7352f2a9b8eb034bc6"} Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.765987 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-k9jhn_87790266-395a-4687-895f-f524c8b2171b/registry-server/0.log" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.766753 5048 generic.go:334] "Generic (PLEG): container finished" podID="87790266-395a-4687-895f-f524c8b2171b" containerID="7ef4c810674b98a5a6f1511a55a423ec25e57323b26b6420cc673e440efd49dd" exitCode=1 Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.766836 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k9jhn" event={"ID":"87790266-395a-4687-895f-f524c8b2171b","Type":"ContainerDied","Data":"7ef4c810674b98a5a6f1511a55a423ec25e57323b26b6420cc673e440efd49dd"} Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.875089 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:46 crc kubenswrapper[5048]: I1213 06:33:46.893713 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d"] Dec 13 06:33:47 crc kubenswrapper[5048]: I1213 06:33:47.055515 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-zm7qx"] Dec 13 06:33:47 crc kubenswrapper[5048]: W1213 06:33:47.065834 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod60f5ae10_2f86_46f8_b613_f017b8753690.slice/crio-3776913a8f666e1ed1870c5139083241bfef44edfe143bdc0fd30fcc355f8f57 WatchSource:0}: Error finding container 3776913a8f666e1ed1870c5139083241bfef44edfe143bdc0fd30fcc355f8f57: Status 404 returned error can't find the container with id 3776913a8f666e1ed1870c5139083241bfef44edfe143bdc0fd30fcc355f8f57 Dec 13 06:33:47 crc kubenswrapper[5048]: I1213 06:33:47.776459 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" event={"ID":"60f5ae10-2f86-46f8-b613-f017b8753690","Type":"ContainerStarted","Data":"3776913a8f666e1ed1870c5139083241bfef44edfe143bdc0fd30fcc355f8f57"} Dec 13 06:33:47 crc kubenswrapper[5048]: I1213 06:33:47.778317 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" event={"ID":"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284","Type":"ContainerStarted","Data":"cf473a60a9b3ac2739bdb4ff8fbd3a8dd4f6ce06b35cd790a919dd87e58893fc"} Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.058810 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dmqg7" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.143793 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16d5a4f5-03a1-46f1-990b-f69dc88f1e9d-utilities\") pod \"16d5a4f5-03a1-46f1-990b-f69dc88f1e9d\" (UID: \"16d5a4f5-03a1-46f1-990b-f69dc88f1e9d\") " Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.143982 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4c4wp\" (UniqueName: \"kubernetes.io/projected/16d5a4f5-03a1-46f1-990b-f69dc88f1e9d-kube-api-access-4c4wp\") pod \"16d5a4f5-03a1-46f1-990b-f69dc88f1e9d\" (UID: \"16d5a4f5-03a1-46f1-990b-f69dc88f1e9d\") " Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.144018 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16d5a4f5-03a1-46f1-990b-f69dc88f1e9d-catalog-content\") pod \"16d5a4f5-03a1-46f1-990b-f69dc88f1e9d\" (UID: \"16d5a4f5-03a1-46f1-990b-f69dc88f1e9d\") " Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.145887 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16d5a4f5-03a1-46f1-990b-f69dc88f1e9d-utilities" (OuterVolumeSpecName: "utilities") pod "16d5a4f5-03a1-46f1-990b-f69dc88f1e9d" (UID: "16d5a4f5-03a1-46f1-990b-f69dc88f1e9d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.149184 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16d5a4f5-03a1-46f1-990b-f69dc88f1e9d-kube-api-access-4c4wp" (OuterVolumeSpecName: "kube-api-access-4c4wp") pod "16d5a4f5-03a1-46f1-990b-f69dc88f1e9d" (UID: "16d5a4f5-03a1-46f1-990b-f69dc88f1e9d"). InnerVolumeSpecName "kube-api-access-4c4wp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.173259 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16d5a4f5-03a1-46f1-990b-f69dc88f1e9d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "16d5a4f5-03a1-46f1-990b-f69dc88f1e9d" (UID: "16d5a4f5-03a1-46f1-990b-f69dc88f1e9d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.246317 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4c4wp\" (UniqueName: \"kubernetes.io/projected/16d5a4f5-03a1-46f1-990b-f69dc88f1e9d-kube-api-access-4c4wp\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.246367 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16d5a4f5-03a1-46f1-990b-f69dc88f1e9d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.246383 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16d5a4f5-03a1-46f1-990b-f69dc88f1e9d-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.705504 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.754056 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6111e5a8-1616-417d-a3d6-d7b1a39ec709-marketplace-trusted-ca\") pod \"6111e5a8-1616-417d-a3d6-d7b1a39ec709\" (UID: \"6111e5a8-1616-417d-a3d6-d7b1a39ec709\") " Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.754115 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9mfsl\" (UniqueName: \"kubernetes.io/projected/6111e5a8-1616-417d-a3d6-d7b1a39ec709-kube-api-access-9mfsl\") pod \"6111e5a8-1616-417d-a3d6-d7b1a39ec709\" (UID: \"6111e5a8-1616-417d-a3d6-d7b1a39ec709\") " Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.754396 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6111e5a8-1616-417d-a3d6-d7b1a39ec709-marketplace-operator-metrics\") pod \"6111e5a8-1616-417d-a3d6-d7b1a39ec709\" (UID: \"6111e5a8-1616-417d-a3d6-d7b1a39ec709\") " Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.755990 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6111e5a8-1616-417d-a3d6-d7b1a39ec709-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "6111e5a8-1616-417d-a3d6-d7b1a39ec709" (UID: "6111e5a8-1616-417d-a3d6-d7b1a39ec709"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.759367 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6111e5a8-1616-417d-a3d6-d7b1a39ec709-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "6111e5a8-1616-417d-a3d6-d7b1a39ec709" (UID: "6111e5a8-1616-417d-a3d6-d7b1a39ec709"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.759998 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6111e5a8-1616-417d-a3d6-d7b1a39ec709-kube-api-access-9mfsl" (OuterVolumeSpecName: "kube-api-access-9mfsl") pod "6111e5a8-1616-417d-a3d6-d7b1a39ec709" (UID: "6111e5a8-1616-417d-a3d6-d7b1a39ec709"). InnerVolumeSpecName "kube-api-access-9mfsl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.790404 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-k9jhn_87790266-395a-4687-895f-f524c8b2171b/registry-server/0.log" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.791588 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k9jhn" event={"ID":"87790266-395a-4687-895f-f524c8b2171b","Type":"ContainerDied","Data":"016d7a69702908f4c58036c466ae7bbd4d4c6313aa0f9fc8462e7e46b54d26de"} Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.791635 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="016d7a69702908f4c58036c466ae7bbd4d4c6313aa0f9fc8462e7e46b54d26de" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.794335 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" event={"ID":"6111e5a8-1616-417d-a3d6-d7b1a39ec709","Type":"ContainerDied","Data":"36d36ffee34eb72a42c3be3d5ad7496d80960581f5fece4280371c17bc4e457a"} Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.794380 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-cwvrg" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.794426 5048 scope.go:117] "RemoveContainer" containerID="b7148a268f666f5da3f9e1c5abb098bd39089e64356c5a1497cb5e28edb25d4d" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.796030 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-k9jhn_87790266-395a-4687-895f-f524c8b2171b/registry-server/0.log" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.806863 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k9jhn" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.807427 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmqg7" event={"ID":"16d5a4f5-03a1-46f1-990b-f69dc88f1e9d","Type":"ContainerDied","Data":"a14133a8445df7195097fd3577b4e03687aabc9ec38c4e4f4e0fbab02ec7a3a9"} Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.807591 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dmqg7" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.855453 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87790266-395a-4687-895f-f524c8b2171b-utilities\") pod \"87790266-395a-4687-895f-f524c8b2171b\" (UID: \"87790266-395a-4687-895f-f524c8b2171b\") " Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.855530 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-895rz\" (UniqueName: \"kubernetes.io/projected/87790266-395a-4687-895f-f524c8b2171b-kube-api-access-895rz\") pod \"87790266-395a-4687-895f-f524c8b2171b\" (UID: \"87790266-395a-4687-895f-f524c8b2171b\") " Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.855654 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87790266-395a-4687-895f-f524c8b2171b-catalog-content\") pod \"87790266-395a-4687-895f-f524c8b2171b\" (UID: \"87790266-395a-4687-895f-f524c8b2171b\") " Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.855988 5048 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6111e5a8-1616-417d-a3d6-d7b1a39ec709-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.856012 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9mfsl\" (UniqueName: \"kubernetes.io/projected/6111e5a8-1616-417d-a3d6-d7b1a39ec709-kube-api-access-9mfsl\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.856024 5048 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6111e5a8-1616-417d-a3d6-d7b1a39ec709-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.857003 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dmqg7"] Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.861267 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87790266-395a-4687-895f-f524c8b2171b-utilities" (OuterVolumeSpecName: "utilities") pod "87790266-395a-4687-895f-f524c8b2171b" (UID: "87790266-395a-4687-895f-f524c8b2171b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.884850 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dmqg7"] Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.887952 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87790266-395a-4687-895f-f524c8b2171b-kube-api-access-895rz" (OuterVolumeSpecName: "kube-api-access-895rz") pod "87790266-395a-4687-895f-f524c8b2171b" (UID: "87790266-395a-4687-895f-f524c8b2171b"). InnerVolumeSpecName "kube-api-access-895rz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.891563 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cwvrg"] Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.895922 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cwvrg"] Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.958120 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87790266-395a-4687-895f-f524c8b2171b-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.958166 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-895rz\" (UniqueName: \"kubernetes.io/projected/87790266-395a-4687-895f-f524c8b2171b-kube-api-access-895rz\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:48 crc kubenswrapper[5048]: I1213 06:33:48.986347 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87790266-395a-4687-895f-f524c8b2171b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "87790266-395a-4687-895f-f524c8b2171b" (UID: "87790266-395a-4687-895f-f524c8b2171b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:33:49 crc kubenswrapper[5048]: I1213 06:33:49.059623 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87790266-395a-4687-895f-f524c8b2171b-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:49 crc kubenswrapper[5048]: I1213 06:33:49.814029 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k9jhn" Dec 13 06:33:49 crc kubenswrapper[5048]: I1213 06:33:49.842934 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-k9jhn"] Dec 13 06:33:49 crc kubenswrapper[5048]: I1213 06:33:49.848404 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-k9jhn"] Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.012028 5048 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 13 06:33:50 crc kubenswrapper[5048]: E1213 06:33:50.012361 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16d5a4f5-03a1-46f1-990b-f69dc88f1e9d" containerName="extract-content" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.012382 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="16d5a4f5-03a1-46f1-990b-f69dc88f1e9d" containerName="extract-content" Dec 13 06:33:50 crc kubenswrapper[5048]: E1213 06:33:50.012399 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87790266-395a-4687-895f-f524c8b2171b" containerName="extract-content" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.012412 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="87790266-395a-4687-895f-f524c8b2171b" containerName="extract-content" Dec 13 06:33:50 crc kubenswrapper[5048]: E1213 06:33:50.012547 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16d5a4f5-03a1-46f1-990b-f69dc88f1e9d" containerName="registry-server" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.012557 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="16d5a4f5-03a1-46f1-990b-f69dc88f1e9d" containerName="registry-server" Dec 13 06:33:50 crc kubenswrapper[5048]: E1213 06:33:50.012568 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16d5a4f5-03a1-46f1-990b-f69dc88f1e9d" containerName="extract-utilities" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.012574 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="16d5a4f5-03a1-46f1-990b-f69dc88f1e9d" containerName="extract-utilities" Dec 13 06:33:50 crc kubenswrapper[5048]: E1213 06:33:50.012588 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87790266-395a-4687-895f-f524c8b2171b" containerName="extract-utilities" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.012595 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="87790266-395a-4687-895f-f524c8b2171b" containerName="extract-utilities" Dec 13 06:33:50 crc kubenswrapper[5048]: E1213 06:33:50.012605 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6111e5a8-1616-417d-a3d6-d7b1a39ec709" containerName="marketplace-operator" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.012611 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="6111e5a8-1616-417d-a3d6-d7b1a39ec709" containerName="marketplace-operator" Dec 13 06:33:50 crc kubenswrapper[5048]: E1213 06:33:50.012620 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87790266-395a-4687-895f-f524c8b2171b" containerName="registry-server" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.012626 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="87790266-395a-4687-895f-f524c8b2171b" containerName="registry-server" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.012759 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="16d5a4f5-03a1-46f1-990b-f69dc88f1e9d" containerName="registry-server" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.012776 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="87790266-395a-4687-895f-f524c8b2171b" containerName="registry-server" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.012786 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="6111e5a8-1616-417d-a3d6-d7b1a39ec709" containerName="marketplace-operator" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.013284 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.016728 5048 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.016787 5048 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 13 06:33:50 crc kubenswrapper[5048]: E1213 06:33:50.017001 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.017024 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 13 06:33:50 crc kubenswrapper[5048]: E1213 06:33:50.017034 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.017042 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 13 06:33:50 crc kubenswrapper[5048]: E1213 06:33:50.017054 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.017062 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 13 06:33:50 crc kubenswrapper[5048]: E1213 06:33:50.017075 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.017082 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 13 06:33:50 crc kubenswrapper[5048]: E1213 06:33:50.017094 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.017101 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 13 06:33:50 crc kubenswrapper[5048]: E1213 06:33:50.017114 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.017121 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.017257 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.017270 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.017283 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.017293 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.017303 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.017314 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 13 06:33:50 crc kubenswrapper[5048]: E1213 06:33:50.017426 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.017453 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.019016 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8" gracePeriod=15 Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.019246 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d" gracePeriod=15 Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.019319 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837" gracePeriod=15 Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.019362 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea" gracePeriod=15 Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.019402 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551" gracePeriod=15 Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.056555 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.075270 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.075671 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.075721 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.075746 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.075767 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.075847 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.075877 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.076175 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.176990 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.177119 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.177077 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.177202 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.177222 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.177301 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.177325 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.177356 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.177256 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.177384 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.177275 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.177340 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.177466 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.177518 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.177628 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.177651 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.347887 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.575618 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16d5a4f5-03a1-46f1-990b-f69dc88f1e9d" path="/var/lib/kubelet/pods/16d5a4f5-03a1-46f1-990b-f69dc88f1e9d/volumes" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.576586 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6111e5a8-1616-417d-a3d6-d7b1a39ec709" path="/var/lib/kubelet/pods/6111e5a8-1616-417d-a3d6-d7b1a39ec709/volumes" Dec 13 06:33:50 crc kubenswrapper[5048]: I1213 06:33:50.577145 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87790266-395a-4687-895f-f524c8b2171b" path="/var/lib/kubelet/pods/87790266-395a-4687-895f-f524c8b2171b/volumes" Dec 13 06:33:51 crc kubenswrapper[5048]: I1213 06:33:51.831984 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 13 06:33:51 crc kubenswrapper[5048]: I1213 06:33:51.834160 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 13 06:33:51 crc kubenswrapper[5048]: I1213 06:33:51.835051 5048 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551" exitCode=2 Dec 13 06:33:52 crc kubenswrapper[5048]: I1213 06:33:52.454758 5048 scope.go:117] "RemoveContainer" containerID="a3d29aeb099a26394bc87634e55189778816c9653e97ff7352f2a9b8eb034bc6" Dec 13 06:33:52 crc kubenswrapper[5048]: E1213 06:33:52.455960 5048 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/events\": dial tcp 38.102.83.251:6443: connect: connection refused" event="&Event{ObjectMeta:{route-controller-manager-74dc84cd6c-9769d.1880b2c98131a568 openshift-route-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-route-controller-manager,Name:route-controller-manager-74dc84cd6c-9769d,UID:8c1ef8c9-54d7-4182-bd65-cdbf97bb6284,APIVersion:v1,ResourceVersion:29733,FieldPath:spec.containers{route-controller-manager},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-13 06:33:52.454919528 +0000 UTC m=+266.321514109,LastTimestamp:2025-12-13 06:33:52.454919528 +0000 UTC m=+266.321514109,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 13 06:33:52 crc kubenswrapper[5048]: I1213 06:33:52.842124 5048 generic.go:334] "Generic (PLEG): container finished" podID="268d907d-1730-456e-8e52-67e58aca607b" containerID="c8f1f04a596aad23301a369c0d4f27bff3633c4555e8593f50e39f6cc5ac30b6" exitCode=0 Dec 13 06:33:52 crc kubenswrapper[5048]: I1213 06:33:52.842494 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"268d907d-1730-456e-8e52-67e58aca607b","Type":"ContainerDied","Data":"c8f1f04a596aad23301a369c0d4f27bff3633c4555e8593f50e39f6cc5ac30b6"} Dec 13 06:33:52 crc kubenswrapper[5048]: I1213 06:33:52.843203 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:52 crc kubenswrapper[5048]: I1213 06:33:52.843453 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:52 crc kubenswrapper[5048]: I1213 06:33:52.845511 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 13 06:33:52 crc kubenswrapper[5048]: I1213 06:33:52.846736 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 13 06:33:52 crc kubenswrapper[5048]: I1213 06:33:52.847378 5048 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d" exitCode=0 Dec 13 06:33:52 crc kubenswrapper[5048]: I1213 06:33:52.847410 5048 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837" exitCode=0 Dec 13 06:33:52 crc kubenswrapper[5048]: I1213 06:33:52.847421 5048 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea" exitCode=0 Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.719577 5048 scope.go:117] "RemoveContainer" containerID="6b0387b7aaed67a4ce5b15785056b60089732db6331d2d15cc45f189d4b7500e" Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.791079 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.792625 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.793575 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.794041 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.794666 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.795026 5048 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.831749 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.831805 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.831894 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.832353 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.832379 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.832395 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.866373 5048 scope.go:117] "RemoveContainer" containerID="2f40cb1d2d9dfa68a91e9a84c8f3d224e3f1d8b8029a5af32b5d7cf36276fe07" Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.875585 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.877310 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.878103 5048 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8" exitCode=0 Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.878491 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.897718 5048 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.898872 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.899277 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.935615 5048 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.935648 5048 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.935663 5048 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:53 crc kubenswrapper[5048]: I1213 06:33:53.948110 5048 scope.go:117] "RemoveContainer" containerID="7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.090065 5048 scope.go:117] "RemoveContainer" containerID="0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.133703 5048 scope.go:117] "RemoveContainer" containerID="7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f" Dec 13 06:33:54 crc kubenswrapper[5048]: E1213 06:33:54.134132 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\": container with ID starting with 7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f not found: ID does not exist" containerID="7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.134159 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f"} err="failed to get container status \"7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\": rpc error: code = NotFound desc = could not find container \"7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\": container with ID starting with 7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f not found: ID does not exist" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.134181 5048 scope.go:117] "RemoveContainer" containerID="e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.166269 5048 scope.go:117] "RemoveContainer" containerID="c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.187422 5048 scope.go:117] "RemoveContainer" containerID="ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.245956 5048 scope.go:117] "RemoveContainer" containerID="86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.276664 5048 scope.go:117] "RemoveContainer" containerID="73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.308710 5048 scope.go:117] "RemoveContainer" containerID="0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d" Dec 13 06:33:54 crc kubenswrapper[5048]: E1213 06:33:54.309074 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\": container with ID starting with 0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d not found: ID does not exist" containerID="0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.309118 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d"} err="failed to get container status \"0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\": rpc error: code = NotFound desc = could not find container \"0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d\": container with ID starting with 0e35bcaf71dbca722798af1f992bfd10d613c10c049b17c84cb6dc3e9f12424d not found: ID does not exist" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.309157 5048 scope.go:117] "RemoveContainer" containerID="7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.310774 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f"} err="failed to get container status \"7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\": rpc error: code = NotFound desc = could not find container \"7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f\": container with ID starting with 7b60ca09494e17a8ecb98929dc47487e146b15fbf4ace81bc6f7d9206bb75b9f not found: ID does not exist" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.310833 5048 scope.go:117] "RemoveContainer" containerID="e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837" Dec 13 06:33:54 crc kubenswrapper[5048]: E1213 06:33:54.311241 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\": container with ID starting with e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837 not found: ID does not exist" containerID="e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.311263 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837"} err="failed to get container status \"e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\": rpc error: code = NotFound desc = could not find container \"e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837\": container with ID starting with e5a4b922a23a52f7631fa8630772ca36be0537ad5e7bbfcf6c9666dc094b0837 not found: ID does not exist" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.311276 5048 scope.go:117] "RemoveContainer" containerID="c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea" Dec 13 06:33:54 crc kubenswrapper[5048]: E1213 06:33:54.311551 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\": container with ID starting with c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea not found: ID does not exist" containerID="c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.311585 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea"} err="failed to get container status \"c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\": rpc error: code = NotFound desc = could not find container \"c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea\": container with ID starting with c569a136d119a567198dbb0f084bbb1f451aa4a12ab97559bb38e82812e931ea not found: ID does not exist" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.311603 5048 scope.go:117] "RemoveContainer" containerID="ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551" Dec 13 06:33:54 crc kubenswrapper[5048]: E1213 06:33:54.312231 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\": container with ID starting with ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551 not found: ID does not exist" containerID="ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.312254 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551"} err="failed to get container status \"ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\": rpc error: code = NotFound desc = could not find container \"ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551\": container with ID starting with ba2492daecc1cce30606d72bcefa92b3c5b1f820aaff52cdfc8a4616c5cce551 not found: ID does not exist" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.312267 5048 scope.go:117] "RemoveContainer" containerID="86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8" Dec 13 06:33:54 crc kubenswrapper[5048]: E1213 06:33:54.312667 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\": container with ID starting with 86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8 not found: ID does not exist" containerID="86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.312688 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8"} err="failed to get container status \"86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\": rpc error: code = NotFound desc = could not find container \"86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8\": container with ID starting with 86693a301f7d6e4ed49995e856a253578ff7fec23ee4946ced5c3a4faf2a29d8 not found: ID does not exist" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.312701 5048 scope.go:117] "RemoveContainer" containerID="73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72" Dec 13 06:33:54 crc kubenswrapper[5048]: E1213 06:33:54.312969 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\": container with ID starting with 73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72 not found: ID does not exist" containerID="73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.312988 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72"} err="failed to get container status \"73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\": rpc error: code = NotFound desc = could not find container \"73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72\": container with ID starting with 73a8578eb94d7557d7a45fa58013a97f7c76fa64b4727f49c138d081aa98de72 not found: ID does not exist" Dec 13 06:33:54 crc kubenswrapper[5048]: E1213 06:33:54.351086 5048 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:54 crc kubenswrapper[5048]: E1213 06:33:54.352068 5048 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:54 crc kubenswrapper[5048]: E1213 06:33:54.352378 5048 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:54 crc kubenswrapper[5048]: E1213 06:33:54.353418 5048 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:54 crc kubenswrapper[5048]: E1213 06:33:54.353714 5048 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.353748 5048 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Dec 13 06:33:54 crc kubenswrapper[5048]: E1213 06:33:54.354014 5048 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="200ms" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.432250 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.433049 5048 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.433333 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.433837 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.545693 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/268d907d-1730-456e-8e52-67e58aca607b-var-lock\") pod \"268d907d-1730-456e-8e52-67e58aca607b\" (UID: \"268d907d-1730-456e-8e52-67e58aca607b\") " Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.546119 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/268d907d-1730-456e-8e52-67e58aca607b-var-lock" (OuterVolumeSpecName: "var-lock") pod "268d907d-1730-456e-8e52-67e58aca607b" (UID: "268d907d-1730-456e-8e52-67e58aca607b"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.545823 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/268d907d-1730-456e-8e52-67e58aca607b-kubelet-dir\") pod \"268d907d-1730-456e-8e52-67e58aca607b\" (UID: \"268d907d-1730-456e-8e52-67e58aca607b\") " Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.546279 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/268d907d-1730-456e-8e52-67e58aca607b-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "268d907d-1730-456e-8e52-67e58aca607b" (UID: "268d907d-1730-456e-8e52-67e58aca607b"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.546311 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/268d907d-1730-456e-8e52-67e58aca607b-kube-api-access\") pod \"268d907d-1730-456e-8e52-67e58aca607b\" (UID: \"268d907d-1730-456e-8e52-67e58aca607b\") " Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.547742 5048 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/268d907d-1730-456e-8e52-67e58aca607b-var-lock\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.547759 5048 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/268d907d-1730-456e-8e52-67e58aca607b-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.557826 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/268d907d-1730-456e-8e52-67e58aca607b-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "268d907d-1730-456e-8e52-67e58aca607b" (UID: "268d907d-1730-456e-8e52-67e58aca607b"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:33:54 crc kubenswrapper[5048]: E1213 06:33:54.557892 5048 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="400ms" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.580727 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Dec 13 06:33:54 crc kubenswrapper[5048]: E1213 06:33:54.591940 5048 log.go:32] "RunPodSandbox from runtime service failed" err=< Dec 13 06:33:54 crc kubenswrapper[5048]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_controller-manager-5d89566846-prjbf_openshift-controller-manager_27b96e48-77ff-4f71-a919-9f30814704a7_0(0591050fb0096128ab6183b570a68d42cbf222c7a6435162587203b9ff0798a7): error adding pod openshift-controller-manager_controller-manager-5d89566846-prjbf to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"0591050fb0096128ab6183b570a68d42cbf222c7a6435162587203b9ff0798a7" Netns:"/var/run/netns/42b24614-cf65-4500-848d-c869a50c5bab" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-controller-manager;K8S_POD_NAME=controller-manager-5d89566846-prjbf;K8S_POD_INFRA_CONTAINER_ID=0591050fb0096128ab6183b570a68d42cbf222c7a6435162587203b9ff0798a7;K8S_POD_UID=27b96e48-77ff-4f71-a919-9f30814704a7" Path:"" ERRORED: error configuring pod [openshift-controller-manager/controller-manager-5d89566846-prjbf] networking: Multus: [openshift-controller-manager/controller-manager-5d89566846-prjbf/27b96e48-77ff-4f71-a919-9f30814704a7]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod controller-manager-5d89566846-prjbf in out of cluster comm: SetNetworkStatus: failed to update the pod controller-manager-5d89566846-prjbf in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-5d89566846-prjbf?timeout=1m0s": dial tcp 38.102.83.251:6443: connect: connection refused Dec 13 06:33:54 crc kubenswrapper[5048]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Dec 13 06:33:54 crc kubenswrapper[5048]: > Dec 13 06:33:54 crc kubenswrapper[5048]: E1213 06:33:54.592102 5048 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Dec 13 06:33:54 crc kubenswrapper[5048]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_controller-manager-5d89566846-prjbf_openshift-controller-manager_27b96e48-77ff-4f71-a919-9f30814704a7_0(0591050fb0096128ab6183b570a68d42cbf222c7a6435162587203b9ff0798a7): error adding pod openshift-controller-manager_controller-manager-5d89566846-prjbf to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"0591050fb0096128ab6183b570a68d42cbf222c7a6435162587203b9ff0798a7" Netns:"/var/run/netns/42b24614-cf65-4500-848d-c869a50c5bab" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-controller-manager;K8S_POD_NAME=controller-manager-5d89566846-prjbf;K8S_POD_INFRA_CONTAINER_ID=0591050fb0096128ab6183b570a68d42cbf222c7a6435162587203b9ff0798a7;K8S_POD_UID=27b96e48-77ff-4f71-a919-9f30814704a7" Path:"" ERRORED: error configuring pod [openshift-controller-manager/controller-manager-5d89566846-prjbf] networking: Multus: [openshift-controller-manager/controller-manager-5d89566846-prjbf/27b96e48-77ff-4f71-a919-9f30814704a7]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod controller-manager-5d89566846-prjbf in out of cluster comm: SetNetworkStatus: failed to update the pod controller-manager-5d89566846-prjbf in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-5d89566846-prjbf?timeout=1m0s": dial tcp 38.102.83.251:6443: connect: connection refused Dec 13 06:33:54 crc kubenswrapper[5048]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Dec 13 06:33:54 crc kubenswrapper[5048]: > pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:54 crc kubenswrapper[5048]: E1213 06:33:54.592176 5048 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err=< Dec 13 06:33:54 crc kubenswrapper[5048]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_controller-manager-5d89566846-prjbf_openshift-controller-manager_27b96e48-77ff-4f71-a919-9f30814704a7_0(0591050fb0096128ab6183b570a68d42cbf222c7a6435162587203b9ff0798a7): error adding pod openshift-controller-manager_controller-manager-5d89566846-prjbf to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"0591050fb0096128ab6183b570a68d42cbf222c7a6435162587203b9ff0798a7" Netns:"/var/run/netns/42b24614-cf65-4500-848d-c869a50c5bab" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-controller-manager;K8S_POD_NAME=controller-manager-5d89566846-prjbf;K8S_POD_INFRA_CONTAINER_ID=0591050fb0096128ab6183b570a68d42cbf222c7a6435162587203b9ff0798a7;K8S_POD_UID=27b96e48-77ff-4f71-a919-9f30814704a7" Path:"" ERRORED: error configuring pod [openshift-controller-manager/controller-manager-5d89566846-prjbf] networking: Multus: [openshift-controller-manager/controller-manager-5d89566846-prjbf/27b96e48-77ff-4f71-a919-9f30814704a7]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod controller-manager-5d89566846-prjbf in out of cluster comm: SetNetworkStatus: failed to update the pod controller-manager-5d89566846-prjbf in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-5d89566846-prjbf?timeout=1m0s": dial tcp 38.102.83.251:6443: connect: connection refused Dec 13 06:33:54 crc kubenswrapper[5048]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Dec 13 06:33:54 crc kubenswrapper[5048]: > pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:54 crc kubenswrapper[5048]: E1213 06:33:54.592302 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"controller-manager-5d89566846-prjbf_openshift-controller-manager(27b96e48-77ff-4f71-a919-9f30814704a7)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"controller-manager-5d89566846-prjbf_openshift-controller-manager(27b96e48-77ff-4f71-a919-9f30814704a7)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_controller-manager-5d89566846-prjbf_openshift-controller-manager_27b96e48-77ff-4f71-a919-9f30814704a7_0(0591050fb0096128ab6183b570a68d42cbf222c7a6435162587203b9ff0798a7): error adding pod openshift-controller-manager_controller-manager-5d89566846-prjbf to CNI network \\\"multus-cni-network\\\": plugin type=\\\"multus-shim\\\" name=\\\"multus-cni-network\\\" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:\\\"0591050fb0096128ab6183b570a68d42cbf222c7a6435162587203b9ff0798a7\\\" Netns:\\\"/var/run/netns/42b24614-cf65-4500-848d-c869a50c5bab\\\" IfName:\\\"eth0\\\" Args:\\\"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-controller-manager;K8S_POD_NAME=controller-manager-5d89566846-prjbf;K8S_POD_INFRA_CONTAINER_ID=0591050fb0096128ab6183b570a68d42cbf222c7a6435162587203b9ff0798a7;K8S_POD_UID=27b96e48-77ff-4f71-a919-9f30814704a7\\\" Path:\\\"\\\" ERRORED: error configuring pod [openshift-controller-manager/controller-manager-5d89566846-prjbf] networking: Multus: [openshift-controller-manager/controller-manager-5d89566846-prjbf/27b96e48-77ff-4f71-a919-9f30814704a7]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod controller-manager-5d89566846-prjbf in out of cluster comm: SetNetworkStatus: failed to update the pod controller-manager-5d89566846-prjbf in out of cluster comm: status update failed for pod /: Get \\\"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-5d89566846-prjbf?timeout=1m0s\\\": dial tcp 38.102.83.251:6443: connect: connection refused\\n': StdinData: {\\\"binDir\\\":\\\"/var/lib/cni/bin\\\",\\\"clusterNetwork\\\":\\\"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf\\\",\\\"cniVersion\\\":\\\"0.3.1\\\",\\\"daemonSocketDir\\\":\\\"/run/multus/socket\\\",\\\"globalNamespaces\\\":\\\"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv\\\",\\\"logLevel\\\":\\\"verbose\\\",\\\"logToStderr\\\":true,\\\"name\\\":\\\"multus-cni-network\\\",\\\"namespaceIsolation\\\":true,\\\"type\\\":\\\"multus-shim\\\"}\"" pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" podUID="27b96e48-77ff-4f71-a919-9f30814704a7" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.649058 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/268d907d-1730-456e-8e52-67e58aca607b-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.889712 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"ec2ea63841d8c109979272ff3cf5a612c26bdc7ccb07ce882d831437b9a65117"} Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.891420 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.891510 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.891507 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"268d907d-1730-456e-8e52-67e58aca607b","Type":"ContainerDied","Data":"273cb1e4ab78e778a25e8c556d3664a544cce9de01598c9bd0215f8c39ce448b"} Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.891574 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="273cb1e4ab78e778a25e8c556d3664a544cce9de01598c9bd0215f8c39ce448b" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.891871 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.895934 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:54 crc kubenswrapper[5048]: I1213 06:33:54.896101 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:54 crc kubenswrapper[5048]: E1213 06:33:54.959906 5048 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="800ms" Dec 13 06:33:55 crc kubenswrapper[5048]: E1213 06:33:55.341247 5048 log.go:32] "RunPodSandbox from runtime service failed" err=< Dec 13 06:33:55 crc kubenswrapper[5048]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_controller-manager-5d89566846-prjbf_openshift-controller-manager_27b96e48-77ff-4f71-a919-9f30814704a7_0(9da399d5261131c3b4d17927d7b1c2acb07bac2ccc00879bb0eb9d1bca524971): error adding pod openshift-controller-manager_controller-manager-5d89566846-prjbf to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"9da399d5261131c3b4d17927d7b1c2acb07bac2ccc00879bb0eb9d1bca524971" Netns:"/var/run/netns/9401e657-53b8-4d20-815c-041158187d39" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-controller-manager;K8S_POD_NAME=controller-manager-5d89566846-prjbf;K8S_POD_INFRA_CONTAINER_ID=9da399d5261131c3b4d17927d7b1c2acb07bac2ccc00879bb0eb9d1bca524971;K8S_POD_UID=27b96e48-77ff-4f71-a919-9f30814704a7" Path:"" ERRORED: error configuring pod [openshift-controller-manager/controller-manager-5d89566846-prjbf] networking: Multus: [openshift-controller-manager/controller-manager-5d89566846-prjbf/27b96e48-77ff-4f71-a919-9f30814704a7]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod controller-manager-5d89566846-prjbf in out of cluster comm: SetNetworkStatus: failed to update the pod controller-manager-5d89566846-prjbf in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-5d89566846-prjbf?timeout=1m0s": dial tcp 38.102.83.251:6443: connect: connection refused Dec 13 06:33:55 crc kubenswrapper[5048]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Dec 13 06:33:55 crc kubenswrapper[5048]: > Dec 13 06:33:55 crc kubenswrapper[5048]: E1213 06:33:55.341472 5048 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Dec 13 06:33:55 crc kubenswrapper[5048]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_controller-manager-5d89566846-prjbf_openshift-controller-manager_27b96e48-77ff-4f71-a919-9f30814704a7_0(9da399d5261131c3b4d17927d7b1c2acb07bac2ccc00879bb0eb9d1bca524971): error adding pod openshift-controller-manager_controller-manager-5d89566846-prjbf to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"9da399d5261131c3b4d17927d7b1c2acb07bac2ccc00879bb0eb9d1bca524971" Netns:"/var/run/netns/9401e657-53b8-4d20-815c-041158187d39" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-controller-manager;K8S_POD_NAME=controller-manager-5d89566846-prjbf;K8S_POD_INFRA_CONTAINER_ID=9da399d5261131c3b4d17927d7b1c2acb07bac2ccc00879bb0eb9d1bca524971;K8S_POD_UID=27b96e48-77ff-4f71-a919-9f30814704a7" Path:"" ERRORED: error configuring pod [openshift-controller-manager/controller-manager-5d89566846-prjbf] networking: Multus: [openshift-controller-manager/controller-manager-5d89566846-prjbf/27b96e48-77ff-4f71-a919-9f30814704a7]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod controller-manager-5d89566846-prjbf in out of cluster comm: SetNetworkStatus: failed to update the pod controller-manager-5d89566846-prjbf in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-5d89566846-prjbf?timeout=1m0s": dial tcp 38.102.83.251:6443: connect: connection refused Dec 13 06:33:55 crc kubenswrapper[5048]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Dec 13 06:33:55 crc kubenswrapper[5048]: > pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:55 crc kubenswrapper[5048]: E1213 06:33:55.341496 5048 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err=< Dec 13 06:33:55 crc kubenswrapper[5048]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_controller-manager-5d89566846-prjbf_openshift-controller-manager_27b96e48-77ff-4f71-a919-9f30814704a7_0(9da399d5261131c3b4d17927d7b1c2acb07bac2ccc00879bb0eb9d1bca524971): error adding pod openshift-controller-manager_controller-manager-5d89566846-prjbf to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"9da399d5261131c3b4d17927d7b1c2acb07bac2ccc00879bb0eb9d1bca524971" Netns:"/var/run/netns/9401e657-53b8-4d20-815c-041158187d39" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-controller-manager;K8S_POD_NAME=controller-manager-5d89566846-prjbf;K8S_POD_INFRA_CONTAINER_ID=9da399d5261131c3b4d17927d7b1c2acb07bac2ccc00879bb0eb9d1bca524971;K8S_POD_UID=27b96e48-77ff-4f71-a919-9f30814704a7" Path:"" ERRORED: error configuring pod [openshift-controller-manager/controller-manager-5d89566846-prjbf] networking: Multus: [openshift-controller-manager/controller-manager-5d89566846-prjbf/27b96e48-77ff-4f71-a919-9f30814704a7]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod controller-manager-5d89566846-prjbf in out of cluster comm: SetNetworkStatus: failed to update the pod controller-manager-5d89566846-prjbf in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-5d89566846-prjbf?timeout=1m0s": dial tcp 38.102.83.251:6443: connect: connection refused Dec 13 06:33:55 crc kubenswrapper[5048]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Dec 13 06:33:55 crc kubenswrapper[5048]: > pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:33:55 crc kubenswrapper[5048]: E1213 06:33:55.341548 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"controller-manager-5d89566846-prjbf_openshift-controller-manager(27b96e48-77ff-4f71-a919-9f30814704a7)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"controller-manager-5d89566846-prjbf_openshift-controller-manager(27b96e48-77ff-4f71-a919-9f30814704a7)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_controller-manager-5d89566846-prjbf_openshift-controller-manager_27b96e48-77ff-4f71-a919-9f30814704a7_0(9da399d5261131c3b4d17927d7b1c2acb07bac2ccc00879bb0eb9d1bca524971): error adding pod openshift-controller-manager_controller-manager-5d89566846-prjbf to CNI network \\\"multus-cni-network\\\": plugin type=\\\"multus-shim\\\" name=\\\"multus-cni-network\\\" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:\\\"9da399d5261131c3b4d17927d7b1c2acb07bac2ccc00879bb0eb9d1bca524971\\\" Netns:\\\"/var/run/netns/9401e657-53b8-4d20-815c-041158187d39\\\" IfName:\\\"eth0\\\" Args:\\\"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-controller-manager;K8S_POD_NAME=controller-manager-5d89566846-prjbf;K8S_POD_INFRA_CONTAINER_ID=9da399d5261131c3b4d17927d7b1c2acb07bac2ccc00879bb0eb9d1bca524971;K8S_POD_UID=27b96e48-77ff-4f71-a919-9f30814704a7\\\" Path:\\\"\\\" ERRORED: error configuring pod [openshift-controller-manager/controller-manager-5d89566846-prjbf] networking: Multus: [openshift-controller-manager/controller-manager-5d89566846-prjbf/27b96e48-77ff-4f71-a919-9f30814704a7]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod controller-manager-5d89566846-prjbf in out of cluster comm: SetNetworkStatus: failed to update the pod controller-manager-5d89566846-prjbf in out of cluster comm: status update failed for pod /: Get \\\"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/pods/controller-manager-5d89566846-prjbf?timeout=1m0s\\\": dial tcp 38.102.83.251:6443: connect: connection refused\\n': StdinData: {\\\"binDir\\\":\\\"/var/lib/cni/bin\\\",\\\"clusterNetwork\\\":\\\"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf\\\",\\\"cniVersion\\\":\\\"0.3.1\\\",\\\"daemonSocketDir\\\":\\\"/run/multus/socket\\\",\\\"globalNamespaces\\\":\\\"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv\\\",\\\"logLevel\\\":\\\"verbose\\\",\\\"logToStderr\\\":true,\\\"name\\\":\\\"multus-cni-network\\\",\\\"namespaceIsolation\\\":true,\\\"type\\\":\\\"multus-shim\\\"}\"" pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" podUID="27b96e48-77ff-4f71-a919-9f30814704a7" Dec 13 06:33:55 crc kubenswrapper[5048]: E1213 06:33:55.761181 5048 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="1.6s" Dec 13 06:33:55 crc kubenswrapper[5048]: I1213 06:33:55.925256 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q8ltl" event={"ID":"f7b93b87-31d6-4279-8ac9-b834417f66d9","Type":"ContainerStarted","Data":"52be2c819fe02667e0cc38e2034d8a8b56a31eab506c70292646b1b3778792b3"} Dec 13 06:33:55 crc kubenswrapper[5048]: I1213 06:33:55.936158 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l9l5s" event={"ID":"6885bcbf-dd86-4e5a-bd21-92395c5ae676","Type":"ContainerStarted","Data":"529c6aca62b97569504fa9b10f92e3cdb545be671e8f66b4e1e1a46d4a06a2ef"} Dec 13 06:33:55 crc kubenswrapper[5048]: I1213 06:33:55.937702 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" event={"ID":"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284","Type":"ContainerStarted","Data":"c2c1ed6442aea77d542f2e2c81ac18e1c0bfef3d208cb59d4043fa0b12360374"} Dec 13 06:33:55 crc kubenswrapper[5048]: I1213 06:33:55.941202 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lqvmp" event={"ID":"57717114-1abd-46bf-bdbd-0a785d734cd3","Type":"ContainerStarted","Data":"c4396335eb2a97d22649564234649656acd25d404188ffced2ce13d449d9a3e3"} Dec 13 06:33:55 crc kubenswrapper[5048]: I1213 06:33:55.942877 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vfwpt" event={"ID":"07531c82-87d1-409f-9c5a-4910633b5786","Type":"ContainerStarted","Data":"a336c19a0f63b8b85867ec6ddf871f2da98da00c3c33b9fd43e4aaadf3f4e36d"} Dec 13 06:33:55 crc kubenswrapper[5048]: I1213 06:33:55.945865 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rb7fl" event={"ID":"d5fb554d-84e3-4bf0-857f-a64da6e6a36f","Type":"ContainerStarted","Data":"666da3c6871801788c05855016c1604b42cdffd0215f079b556bc97f48647603"} Dec 13 06:33:55 crc kubenswrapper[5048]: I1213 06:33:55.951099 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hjls5" event={"ID":"1ea76a75-e8de-4a91-89af-726df36e8a21","Type":"ContainerStarted","Data":"506e65738f12dec546347db63eb4ad7d6dae4e3caa91938d658c64bc57e8ef87"} Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.579582 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.579821 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.580030 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.580235 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.960149 5048 generic.go:334] "Generic (PLEG): container finished" podID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" containerID="666da3c6871801788c05855016c1604b42cdffd0215f079b556bc97f48647603" exitCode=0 Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.960211 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rb7fl" event={"ID":"d5fb554d-84e3-4bf0-857f-a64da6e6a36f","Type":"ContainerDied","Data":"666da3c6871801788c05855016c1604b42cdffd0215f079b556bc97f48647603"} Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.961056 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.961474 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.961684 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.962080 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.963393 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.963571 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-zm7qx_60f5ae10-2f86-46f8-b613-f017b8753690/marketplace-operator/0.log" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.963623 5048 generic.go:334] "Generic (PLEG): container finished" podID="60f5ae10-2f86-46f8-b613-f017b8753690" containerID="727f275f8ea7690142a7908237965bf28c2bde5da65b72e422d26634fbdaa0bb" exitCode=1 Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.963738 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" event={"ID":"60f5ae10-2f86-46f8-b613-f017b8753690","Type":"ContainerDied","Data":"727f275f8ea7690142a7908237965bf28c2bde5da65b72e422d26634fbdaa0bb"} Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.964497 5048 scope.go:117] "RemoveContainer" containerID="727f275f8ea7690142a7908237965bf28c2bde5da65b72e422d26634fbdaa0bb" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.964558 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.964907 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.965195 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.965408 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.965853 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.966309 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"98b93aef31307e077f3cd0616fadc36c2475728e96423a52f5b253b522a06c94"} Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.966353 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-q8ltl" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" containerName="registry-server" containerID="cri-o://52be2c819fe02667e0cc38e2034d8a8b56a31eab506c70292646b1b3778792b3" gracePeriod=30 Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.966522 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lqvmp" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" containerName="registry-server" containerID="cri-o://c4396335eb2a97d22649564234649656acd25d404188ffced2ce13d449d9a3e3" gracePeriod=30 Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.966754 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hjls5" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" containerName="registry-server" containerID="cri-o://506e65738f12dec546347db63eb4ad7d6dae4e3caa91938d658c64bc57e8ef87" gracePeriod=30 Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.967053 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-l9l5s" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" containerName="registry-server" containerID="cri-o://529c6aca62b97569504fa9b10f92e3cdb545be671e8f66b4e1e1a46d4a06a2ef" gracePeriod=30 Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.967130 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.967176 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.967426 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vfwpt" podUID="07531c82-87d1-409f-9c5a-4910633b5786" containerName="registry-server" containerID="cri-o://a336c19a0f63b8b85867ec6ddf871f2da98da00c3c33b9fd43e4aaadf3f4e36d" gracePeriod=30 Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.969148 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.969750 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.970058 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.970291 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.970584 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.970868 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.971108 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.971503 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.971814 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.972032 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.972380 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.972648 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.972859 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.973086 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.978289 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.978581 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:56 crc kubenswrapper[5048]: I1213 06:33:56.978790 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.269225 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rb7fl" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.272885 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.273201 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.273511 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.273699 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.273941 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.274133 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.274496 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.274684 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.274945 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.275344 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:57 crc kubenswrapper[5048]: E1213 06:33:57.364009 5048 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="3.2s" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.394623 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kbc9l\" (UniqueName: \"kubernetes.io/projected/d5fb554d-84e3-4bf0-857f-a64da6e6a36f-kube-api-access-kbc9l\") pod \"d5fb554d-84e3-4bf0-857f-a64da6e6a36f\" (UID: \"d5fb554d-84e3-4bf0-857f-a64da6e6a36f\") " Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.394681 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5fb554d-84e3-4bf0-857f-a64da6e6a36f-catalog-content\") pod \"d5fb554d-84e3-4bf0-857f-a64da6e6a36f\" (UID: \"d5fb554d-84e3-4bf0-857f-a64da6e6a36f\") " Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.394730 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5fb554d-84e3-4bf0-857f-a64da6e6a36f-utilities\") pod \"d5fb554d-84e3-4bf0-857f-a64da6e6a36f\" (UID: \"d5fb554d-84e3-4bf0-857f-a64da6e6a36f\") " Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.396013 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5fb554d-84e3-4bf0-857f-a64da6e6a36f-utilities" (OuterVolumeSpecName: "utilities") pod "d5fb554d-84e3-4bf0-857f-a64da6e6a36f" (UID: "d5fb554d-84e3-4bf0-857f-a64da6e6a36f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.408880 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5fb554d-84e3-4bf0-857f-a64da6e6a36f-kube-api-access-kbc9l" (OuterVolumeSpecName: "kube-api-access-kbc9l") pod "d5fb554d-84e3-4bf0-857f-a64da6e6a36f" (UID: "d5fb554d-84e3-4bf0-857f-a64da6e6a36f"). InnerVolumeSpecName "kube-api-access-kbc9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.435101 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5fb554d-84e3-4bf0-857f-a64da6e6a36f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d5fb554d-84e3-4bf0-857f-a64da6e6a36f" (UID: "d5fb554d-84e3-4bf0-857f-a64da6e6a36f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.496376 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5fb554d-84e3-4bf0-857f-a64da6e6a36f-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.496470 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kbc9l\" (UniqueName: \"kubernetes.io/projected/d5fb554d-84e3-4bf0-857f-a64da6e6a36f-kube-api-access-kbc9l\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.496484 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5fb554d-84e3-4bf0-857f-a64da6e6a36f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.966929 5048 patch_prober.go:28] interesting pod/route-controller-manager-74dc84cd6c-9769d container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.967004 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.974543 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rb7fl" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.975522 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rb7fl" event={"ID":"d5fb554d-84e3-4bf0-857f-a64da6e6a36f","Type":"ContainerDied","Data":"838fb14a68a00c979122dd607a09c8c6b8d24e909176a2aacb01a247003a23fa"} Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.975593 5048 scope.go:117] "RemoveContainer" containerID="666da3c6871801788c05855016c1604b42cdffd0215f079b556bc97f48647603" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.976128 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.976453 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.976880 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.977641 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.978004 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.978616 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.978840 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.979002 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.979149 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.979300 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:57 crc kubenswrapper[5048]: I1213 06:33:57.997905 5048 scope.go:117] "RemoveContainer" containerID="4cc2b400c561cf80f3bb04bd290206deb73d14dcd8dd41468ca7e5159f5101e5" Dec 13 06:33:58 crc kubenswrapper[5048]: I1213 06:33:58.008549 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:58 crc kubenswrapper[5048]: I1213 06:33:58.009267 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:58 crc kubenswrapper[5048]: I1213 06:33:58.009695 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:58 crc kubenswrapper[5048]: I1213 06:33:58.010136 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:58 crc kubenswrapper[5048]: I1213 06:33:58.010419 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:58 crc kubenswrapper[5048]: I1213 06:33:58.010786 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:58 crc kubenswrapper[5048]: I1213 06:33:58.011060 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:58 crc kubenswrapper[5048]: I1213 06:33:58.011384 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:58 crc kubenswrapper[5048]: I1213 06:33:58.011764 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:58 crc kubenswrapper[5048]: I1213 06:33:58.012208 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:33:58 crc kubenswrapper[5048]: E1213 06:33:58.347249 5048 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/events\": dial tcp 38.102.83.251:6443: connect: connection refused" event="&Event{ObjectMeta:{route-controller-manager-74dc84cd6c-9769d.1880b2c98131a568 openshift-route-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-route-controller-manager,Name:route-controller-manager-74dc84cd6c-9769d,UID:8c1ef8c9-54d7-4182-bd65-cdbf97bb6284,APIVersion:v1,ResourceVersion:29733,FieldPath:spec.containers{route-controller-manager},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-13 06:33:52.454919528 +0000 UTC m=+266.321514109,LastTimestamp:2025-12-13 06:33:52.454919528 +0000 UTC m=+266.321514109,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 13 06:33:58 crc kubenswrapper[5048]: I1213 06:33:58.976184 5048 patch_prober.go:28] interesting pod/route-controller-manager-74dc84cd6c-9769d container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 13 06:33:58 crc kubenswrapper[5048]: I1213 06:33:58.976604 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 13 06:33:59 crc kubenswrapper[5048]: I1213 06:33:59.986300 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-l9l5s_6885bcbf-dd86-4e5a-bd21-92395c5ae676/registry-server/0.log" Dec 13 06:33:59 crc kubenswrapper[5048]: I1213 06:33:59.987307 5048 generic.go:334] "Generic (PLEG): container finished" podID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" containerID="529c6aca62b97569504fa9b10f92e3cdb545be671e8f66b4e1e1a46d4a06a2ef" exitCode=1 Dec 13 06:33:59 crc kubenswrapper[5048]: I1213 06:33:59.987499 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l9l5s" event={"ID":"6885bcbf-dd86-4e5a-bd21-92395c5ae676","Type":"ContainerDied","Data":"529c6aca62b97569504fa9b10f92e3cdb545be671e8f66b4e1e1a46d4a06a2ef"} Dec 13 06:33:59 crc kubenswrapper[5048]: I1213 06:33:59.989116 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lqvmp_57717114-1abd-46bf-bdbd-0a785d734cd3/registry-server/0.log" Dec 13 06:33:59 crc kubenswrapper[5048]: I1213 06:33:59.989809 5048 generic.go:334] "Generic (PLEG): container finished" podID="57717114-1abd-46bf-bdbd-0a785d734cd3" containerID="c4396335eb2a97d22649564234649656acd25d404188ffced2ce13d449d9a3e3" exitCode=1 Dec 13 06:33:59 crc kubenswrapper[5048]: I1213 06:33:59.989831 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lqvmp" event={"ID":"57717114-1abd-46bf-bdbd-0a785d734cd3","Type":"ContainerDied","Data":"c4396335eb2a97d22649564234649656acd25d404188ffced2ce13d449d9a3e3"} Dec 13 06:33:59 crc kubenswrapper[5048]: I1213 06:33:59.991971 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-hjls5_1ea76a75-e8de-4a91-89af-726df36e8a21/registry-server/0.log" Dec 13 06:33:59 crc kubenswrapper[5048]: I1213 06:33:59.992952 5048 generic.go:334] "Generic (PLEG): container finished" podID="1ea76a75-e8de-4a91-89af-726df36e8a21" containerID="506e65738f12dec546347db63eb4ad7d6dae4e3caa91938d658c64bc57e8ef87" exitCode=1 Dec 13 06:33:59 crc kubenswrapper[5048]: I1213 06:33:59.993024 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hjls5" event={"ID":"1ea76a75-e8de-4a91-89af-726df36e8a21","Type":"ContainerDied","Data":"506e65738f12dec546347db63eb4ad7d6dae4e3caa91938d658c64bc57e8ef87"} Dec 13 06:33:59 crc kubenswrapper[5048]: I1213 06:33:59.994957 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-vfwpt_07531c82-87d1-409f-9c5a-4910633b5786/registry-server/0.log" Dec 13 06:33:59 crc kubenswrapper[5048]: I1213 06:33:59.996045 5048 generic.go:334] "Generic (PLEG): container finished" podID="07531c82-87d1-409f-9c5a-4910633b5786" containerID="a336c19a0f63b8b85867ec6ddf871f2da98da00c3c33b9fd43e4aaadf3f4e36d" exitCode=1 Dec 13 06:33:59 crc kubenswrapper[5048]: I1213 06:33:59.996106 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vfwpt" event={"ID":"07531c82-87d1-409f-9c5a-4910633b5786","Type":"ContainerDied","Data":"a336c19a0f63b8b85867ec6ddf871f2da98da00c3c33b9fd43e4aaadf3f4e36d"} Dec 13 06:33:59 crc kubenswrapper[5048]: I1213 06:33:59.998041 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-zm7qx_60f5ae10-2f86-46f8-b613-f017b8753690/marketplace-operator/0.log" Dec 13 06:33:59 crc kubenswrapper[5048]: I1213 06:33:59.998103 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" event={"ID":"60f5ae10-2f86-46f8-b613-f017b8753690","Type":"ContainerStarted","Data":"46a3ec48c5fc8e47ed316f37e3e09ed748fd8df9bd7a24dbe03af86e70a4c351"} Dec 13 06:33:59 crc kubenswrapper[5048]: I1213 06:33:59.998681 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" Dec 13 06:33:59 crc kubenswrapper[5048]: I1213 06:33:59.999223 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:33:59.999982 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.000150 5048 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-zm7qx container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.61:8080/healthz\": dial tcp 10.217.0.61:8080: connect: connection refused" start-of-body= Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.000204 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.61:8080/healthz\": dial tcp 10.217.0.61:8080: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.000319 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.000552 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.000639 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-q8ltl_f7b93b87-31d6-4279-8ac9-b834417f66d9/registry-server/0.log" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.000911 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.001298 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.001319 5048 generic.go:334] "Generic (PLEG): container finished" podID="f7b93b87-31d6-4279-8ac9-b834417f66d9" containerID="52be2c819fe02667e0cc38e2034d8a8b56a31eab506c70292646b1b3778792b3" exitCode=1 Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.001342 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q8ltl" event={"ID":"f7b93b87-31d6-4279-8ac9-b834417f66d9","Type":"ContainerDied","Data":"52be2c819fe02667e0cc38e2034d8a8b56a31eab506c70292646b1b3778792b3"} Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.001522 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.001725 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.001952 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.002212 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: E1213 06:34:00.565773 5048 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="6.4s" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.790712 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lqvmp_57717114-1abd-46bf-bdbd-0a785d734cd3/registry-server/0.log" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.791822 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lqvmp" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.792422 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.792797 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.793285 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.793554 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.793841 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.794186 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.794470 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.794749 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.795074 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.795534 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.847298 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57717114-1abd-46bf-bdbd-0a785d734cd3-catalog-content\") pod \"57717114-1abd-46bf-bdbd-0a785d734cd3\" (UID: \"57717114-1abd-46bf-bdbd-0a785d734cd3\") " Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.847861 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57717114-1abd-46bf-bdbd-0a785d734cd3-utilities\") pod \"57717114-1abd-46bf-bdbd-0a785d734cd3\" (UID: \"57717114-1abd-46bf-bdbd-0a785d734cd3\") " Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.847905 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cksfn\" (UniqueName: \"kubernetes.io/projected/57717114-1abd-46bf-bdbd-0a785d734cd3-kube-api-access-cksfn\") pod \"57717114-1abd-46bf-bdbd-0a785d734cd3\" (UID: \"57717114-1abd-46bf-bdbd-0a785d734cd3\") " Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.848734 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57717114-1abd-46bf-bdbd-0a785d734cd3-utilities" (OuterVolumeSpecName: "utilities") pod "57717114-1abd-46bf-bdbd-0a785d734cd3" (UID: "57717114-1abd-46bf-bdbd-0a785d734cd3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.849206 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57717114-1abd-46bf-bdbd-0a785d734cd3-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.856430 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57717114-1abd-46bf-bdbd-0a785d734cd3-kube-api-access-cksfn" (OuterVolumeSpecName: "kube-api-access-cksfn") pod "57717114-1abd-46bf-bdbd-0a785d734cd3" (UID: "57717114-1abd-46bf-bdbd-0a785d734cd3"). InnerVolumeSpecName "kube-api-access-cksfn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.898303 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-hjls5_1ea76a75-e8de-4a91-89af-726df36e8a21/registry-server/0.log" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.900034 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hjls5" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.900730 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.901635 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.901879 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.902073 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.902248 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.902511 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.902782 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.902965 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.903123 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.903273 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.912614 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-q8ltl" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.928526 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57717114-1abd-46bf-bdbd-0a785d734cd3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57717114-1abd-46bf-bdbd-0a785d734cd3" (UID: "57717114-1abd-46bf-bdbd-0a785d734cd3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.940763 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-vfwpt_07531c82-87d1-409f-9c5a-4910633b5786/registry-server/0.log" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.941765 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vfwpt" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.942355 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.942778 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.943000 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.943217 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.943460 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.943681 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.943903 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.944136 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.944352 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.944593 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.950344 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cwtbg\" (UniqueName: \"kubernetes.io/projected/1ea76a75-e8de-4a91-89af-726df36e8a21-kube-api-access-cwtbg\") pod \"1ea76a75-e8de-4a91-89af-726df36e8a21\" (UID: \"1ea76a75-e8de-4a91-89af-726df36e8a21\") " Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.950407 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ea76a75-e8de-4a91-89af-726df36e8a21-catalog-content\") pod \"1ea76a75-e8de-4a91-89af-726df36e8a21\" (UID: \"1ea76a75-e8de-4a91-89af-726df36e8a21\") " Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.950538 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ea76a75-e8de-4a91-89af-726df36e8a21-utilities\") pod \"1ea76a75-e8de-4a91-89af-726df36e8a21\" (UID: \"1ea76a75-e8de-4a91-89af-726df36e8a21\") " Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.950825 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cksfn\" (UniqueName: \"kubernetes.io/projected/57717114-1abd-46bf-bdbd-0a785d734cd3-kube-api-access-cksfn\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.950845 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57717114-1abd-46bf-bdbd-0a785d734cd3-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.951604 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ea76a75-e8de-4a91-89af-726df36e8a21-utilities" (OuterVolumeSpecName: "utilities") pod "1ea76a75-e8de-4a91-89af-726df36e8a21" (UID: "1ea76a75-e8de-4a91-89af-726df36e8a21"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:34:00 crc kubenswrapper[5048]: I1213 06:34:00.958573 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ea76a75-e8de-4a91-89af-726df36e8a21-kube-api-access-cwtbg" (OuterVolumeSpecName: "kube-api-access-cwtbg") pod "1ea76a75-e8de-4a91-89af-726df36e8a21" (UID: "1ea76a75-e8de-4a91-89af-726df36e8a21"). InnerVolumeSpecName "kube-api-access-cwtbg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.001344 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ea76a75-e8de-4a91-89af-726df36e8a21-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1ea76a75-e8de-4a91-89af-726df36e8a21" (UID: "1ea76a75-e8de-4a91-89af-726df36e8a21"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.018323 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-zm7qx_60f5ae10-2f86-46f8-b613-f017b8753690/marketplace-operator/1.log" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.019205 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-zm7qx_60f5ae10-2f86-46f8-b613-f017b8753690/marketplace-operator/0.log" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.019263 5048 generic.go:334] "Generic (PLEG): container finished" podID="60f5ae10-2f86-46f8-b613-f017b8753690" containerID="46a3ec48c5fc8e47ed316f37e3e09ed748fd8df9bd7a24dbe03af86e70a4c351" exitCode=1 Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.019318 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" event={"ID":"60f5ae10-2f86-46f8-b613-f017b8753690","Type":"ContainerDied","Data":"46a3ec48c5fc8e47ed316f37e3e09ed748fd8df9bd7a24dbe03af86e70a4c351"} Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.019353 5048 scope.go:117] "RemoveContainer" containerID="727f275f8ea7690142a7908237965bf28c2bde5da65b72e422d26634fbdaa0bb" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.019830 5048 scope.go:117] "RemoveContainer" containerID="46a3ec48c5fc8e47ed316f37e3e09ed748fd8df9bd7a24dbe03af86e70a4c351" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.020069 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-l9l5s_6885bcbf-dd86-4e5a-bd21-92395c5ae676/registry-server/0.log" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.020048 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: E1213 06:34:01.020326 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-zm7qx_openshift-marketplace(60f5ae10-2f86-46f8-b613-f017b8753690)\"" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.020463 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.020737 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.021267 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.021612 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.021842 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.022262 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.022511 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.022819 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.023134 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.023402 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l9l5s" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.023892 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.024240 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.024603 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.024780 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lqvmp_57717114-1abd-46bf-bdbd-0a785d734cd3/registry-server/0.log" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.025164 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.025459 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.025767 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.026052 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.026311 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.026535 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.026598 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lqvmp" event={"ID":"57717114-1abd-46bf-bdbd-0a785d734cd3","Type":"ContainerDied","Data":"44c223a9dc9877cdb14998758b183038769841a2039d90d77d127efb13283922"} Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.026722 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lqvmp" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.026960 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.027543 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.028313 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.028703 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.029262 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.029409 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-hjls5_1ea76a75-e8de-4a91-89af-726df36e8a21/registry-server/0.log" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.029808 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.030107 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hjls5" event={"ID":"1ea76a75-e8de-4a91-89af-726df36e8a21","Type":"ContainerDied","Data":"05c6f9bdf50bb7f4fe15c659050af614a77a95db583e634c7c4e0c09c80b1e11"} Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.030188 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hjls5" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.030261 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.030665 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.030969 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.031287 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.031682 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.032135 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.032487 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.032833 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.033282 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.033542 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.033725 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.033890 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.034134 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.034240 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-vfwpt_07531c82-87d1-409f-9c5a-4910633b5786/registry-server/0.log" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.034421 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.034685 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.035884 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vfwpt" event={"ID":"07531c82-87d1-409f-9c5a-4910633b5786","Type":"ContainerDied","Data":"355278c264fb8041d9179753b9904b27876a78e7ef8d5bca3fa592911de20691"} Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.035916 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vfwpt" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.036526 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.036886 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.037482 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.037750 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.038023 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.038335 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.038588 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.038797 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.039034 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.039246 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.051489 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6885bcbf-dd86-4e5a-bd21-92395c5ae676-catalog-content\") pod \"6885bcbf-dd86-4e5a-bd21-92395c5ae676\" (UID: \"6885bcbf-dd86-4e5a-bd21-92395c5ae676\") " Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.051533 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r8r72\" (UniqueName: \"kubernetes.io/projected/07531c82-87d1-409f-9c5a-4910633b5786-kube-api-access-r8r72\") pod \"07531c82-87d1-409f-9c5a-4910633b5786\" (UID: \"07531c82-87d1-409f-9c5a-4910633b5786\") " Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.051552 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vvqcq\" (UniqueName: \"kubernetes.io/projected/6885bcbf-dd86-4e5a-bd21-92395c5ae676-kube-api-access-vvqcq\") pod \"6885bcbf-dd86-4e5a-bd21-92395c5ae676\" (UID: \"6885bcbf-dd86-4e5a-bd21-92395c5ae676\") " Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.051624 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07531c82-87d1-409f-9c5a-4910633b5786-utilities\") pod \"07531c82-87d1-409f-9c5a-4910633b5786\" (UID: \"07531c82-87d1-409f-9c5a-4910633b5786\") " Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.052305 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.052332 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6885bcbf-dd86-4e5a-bd21-92395c5ae676-utilities\") pod \"6885bcbf-dd86-4e5a-bd21-92395c5ae676\" (UID: \"6885bcbf-dd86-4e5a-bd21-92395c5ae676\") " Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.052546 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07531c82-87d1-409f-9c5a-4910633b5786-catalog-content\") pod \"07531c82-87d1-409f-9c5a-4910633b5786\" (UID: \"07531c82-87d1-409f-9c5a-4910633b5786\") " Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.052567 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.052877 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ea76a75-e8de-4a91-89af-726df36e8a21-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.052897 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cwtbg\" (UniqueName: \"kubernetes.io/projected/1ea76a75-e8de-4a91-89af-726df36e8a21-kube-api-access-cwtbg\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.052908 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ea76a75-e8de-4a91-89af-726df36e8a21-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.052881 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.053359 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.053722 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.053915 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07531c82-87d1-409f-9c5a-4910633b5786-utilities" (OuterVolumeSpecName: "utilities") pod "07531c82-87d1-409f-9c5a-4910633b5786" (UID: "07531c82-87d1-409f-9c5a-4910633b5786"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.054039 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6885bcbf-dd86-4e5a-bd21-92395c5ae676-utilities" (OuterVolumeSpecName: "utilities") pod "6885bcbf-dd86-4e5a-bd21-92395c5ae676" (UID: "6885bcbf-dd86-4e5a-bd21-92395c5ae676"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.054205 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.054597 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.054948 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.055209 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.056357 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07531c82-87d1-409f-9c5a-4910633b5786-kube-api-access-r8r72" (OuterVolumeSpecName: "kube-api-access-r8r72") pod "07531c82-87d1-409f-9c5a-4910633b5786" (UID: "07531c82-87d1-409f-9c5a-4910633b5786"). InnerVolumeSpecName "kube-api-access-r8r72". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.056383 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.056875 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.057073 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.057341 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.057969 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-q8ltl_f7b93b87-31d6-4279-8ac9-b834417f66d9/registry-server/0.log" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.057975 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.058394 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.058870 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.058910 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q8ltl" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.059134 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.059361 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.059586 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.059773 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.060302 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.061711 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.062097 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.062382 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.062815 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.063211 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6885bcbf-dd86-4e5a-bd21-92395c5ae676-kube-api-access-vvqcq" (OuterVolumeSpecName: "kube-api-access-vvqcq") pod "6885bcbf-dd86-4e5a-bd21-92395c5ae676" (UID: "6885bcbf-dd86-4e5a-bd21-92395c5ae676"). InnerVolumeSpecName "kube-api-access-vvqcq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.063334 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.063606 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.063836 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.064059 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.064283 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.114272 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07531c82-87d1-409f-9c5a-4910633b5786-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "07531c82-87d1-409f-9c5a-4910633b5786" (UID: "07531c82-87d1-409f-9c5a-4910633b5786"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.153888 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7b93b87-31d6-4279-8ac9-b834417f66d9-utilities\") pod \"f7b93b87-31d6-4279-8ac9-b834417f66d9\" (UID: \"f7b93b87-31d6-4279-8ac9-b834417f66d9\") " Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.153957 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7b93b87-31d6-4279-8ac9-b834417f66d9-catalog-content\") pod \"f7b93b87-31d6-4279-8ac9-b834417f66d9\" (UID: \"f7b93b87-31d6-4279-8ac9-b834417f66d9\") " Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.154143 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6bz7t\" (UniqueName: \"kubernetes.io/projected/f7b93b87-31d6-4279-8ac9-b834417f66d9-kube-api-access-6bz7t\") pod \"f7b93b87-31d6-4279-8ac9-b834417f66d9\" (UID: \"f7b93b87-31d6-4279-8ac9-b834417f66d9\") " Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.154452 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6885bcbf-dd86-4e5a-bd21-92395c5ae676-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.154478 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07531c82-87d1-409f-9c5a-4910633b5786-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.154494 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r8r72\" (UniqueName: \"kubernetes.io/projected/07531c82-87d1-409f-9c5a-4910633b5786-kube-api-access-r8r72\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.154507 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vvqcq\" (UniqueName: \"kubernetes.io/projected/6885bcbf-dd86-4e5a-bd21-92395c5ae676-kube-api-access-vvqcq\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.154522 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07531c82-87d1-409f-9c5a-4910633b5786-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.154809 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7b93b87-31d6-4279-8ac9-b834417f66d9-utilities" (OuterVolumeSpecName: "utilities") pod "f7b93b87-31d6-4279-8ac9-b834417f66d9" (UID: "f7b93b87-31d6-4279-8ac9-b834417f66d9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.158195 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7b93b87-31d6-4279-8ac9-b834417f66d9-kube-api-access-6bz7t" (OuterVolumeSpecName: "kube-api-access-6bz7t") pod "f7b93b87-31d6-4279-8ac9-b834417f66d9" (UID: "f7b93b87-31d6-4279-8ac9-b834417f66d9"). InnerVolumeSpecName "kube-api-access-6bz7t". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.183488 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6885bcbf-dd86-4e5a-bd21-92395c5ae676-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6885bcbf-dd86-4e5a-bd21-92395c5ae676" (UID: "6885bcbf-dd86-4e5a-bd21-92395c5ae676"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.206579 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7b93b87-31d6-4279-8ac9-b834417f66d9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f7b93b87-31d6-4279-8ac9-b834417f66d9" (UID: "f7b93b87-31d6-4279-8ac9-b834417f66d9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.256270 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6bz7t\" (UniqueName: \"kubernetes.io/projected/f7b93b87-31d6-4279-8ac9-b834417f66d9-kube-api-access-6bz7t\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.256309 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6885bcbf-dd86-4e5a-bd21-92395c5ae676-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.256318 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7b93b87-31d6-4279-8ac9-b834417f66d9-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.256346 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7b93b87-31d6-4279-8ac9-b834417f66d9-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.351831 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.351993 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.352164 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.352537 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.353120 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.354245 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.354704 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.355013 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.355273 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:01 crc kubenswrapper[5048]: I1213 06:34:01.355570 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.042485 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-q8ltl_f7b93b87-31d6-4279-8ac9-b834417f66d9/registry-server/0.log" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.044866 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q8ltl" event={"ID":"f7b93b87-31d6-4279-8ac9-b834417f66d9","Type":"ContainerDied","Data":"52a8dac9ef288ba7ee8a9bfc9bea2f347c61720ea3db5461133cd227ac9b406a"} Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.044903 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q8ltl" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.045702 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.045961 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.046504 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.046584 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-l9l5s_6885bcbf-dd86-4e5a-bd21-92395c5ae676/registry-server/0.log" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.046858 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.047317 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.047508 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l9l5s" event={"ID":"6885bcbf-dd86-4e5a-bd21-92395c5ae676","Type":"ContainerDied","Data":"0d3727a8ba5f468e9ef2575be6348dad212cf60449af253ac7d6960ae044eab9"} Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.047526 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l9l5s" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.047672 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.048158 5048 scope.go:117] "RemoveContainer" containerID="46a3ec48c5fc8e47ed316f37e3e09ed748fd8df9bd7a24dbe03af86e70a4c351" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.048155 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: E1213 06:34:02.048510 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-zm7qx_openshift-marketplace(60f5ae10-2f86-46f8-b613-f017b8753690)\"" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.048713 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.048997 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.049384 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.049766 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.049969 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.050262 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.050623 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.050963 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.051177 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.051425 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.051743 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.051949 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.052242 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.064459 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.064755 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.065171 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.065641 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.065908 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.066174 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.066540 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.066810 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.067075 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.067307 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.067572 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.067722 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.067917 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.068169 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.068379 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.068605 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.068802 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.069010 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.069254 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.069478 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.601052 5048 scope.go:117] "RemoveContainer" containerID="c4396335eb2a97d22649564234649656acd25d404188ffced2ce13d449d9a3e3" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.617374 5048 scope.go:117] "RemoveContainer" containerID="dfd858171332a3f68676b6da6792a1c9ce21c9c0130e32d8ce97a3ca6220dbd9" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.638702 5048 scope.go:117] "RemoveContainer" containerID="6877c0777b3ec2b3c40ceb70f2d4d77c7fd493eb2ef816b54d5d7006a0b3ea2b" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.653910 5048 scope.go:117] "RemoveContainer" containerID="506e65738f12dec546347db63eb4ad7d6dae4e3caa91938d658c64bc57e8ef87" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.676026 5048 scope.go:117] "RemoveContainer" containerID="37e5ab0ae5d487c0c01e40129b68fcc8e0a8b2d9eb0b1242919febc65258425d" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.694347 5048 scope.go:117] "RemoveContainer" containerID="9d1869b8037dded4931b25b72642998e3775101982ab667e4a65d65b274e2d41" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.710269 5048 scope.go:117] "RemoveContainer" containerID="a336c19a0f63b8b85867ec6ddf871f2da98da00c3c33b9fd43e4aaadf3f4e36d" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.729531 5048 scope.go:117] "RemoveContainer" containerID="88945c341701d4be8dfd08598b74c69f11666d3afa4c7f19e8a0b7734878a207" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.745766 5048 scope.go:117] "RemoveContainer" containerID="8cf9ecd8091677719bf3d3dd416aeba117709026e6176183dc61197e6b968a38" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.765190 5048 scope.go:117] "RemoveContainer" containerID="52be2c819fe02667e0cc38e2034d8a8b56a31eab506c70292646b1b3778792b3" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.784753 5048 scope.go:117] "RemoveContainer" containerID="1fd54bc6bd95d53fe9009107043714905f8aa7fc103915bea961493ba9c4e603" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.803941 5048 scope.go:117] "RemoveContainer" containerID="34b8fba5e128eadb260a5293bac04f0217ab42c08f89733b6f1fed8b54c22b2a" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.824961 5048 scope.go:117] "RemoveContainer" containerID="529c6aca62b97569504fa9b10f92e3cdb545be671e8f66b4e1e1a46d4a06a2ef" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.841367 5048 scope.go:117] "RemoveContainer" containerID="d06f0e5a8ec6c60d1a528bfcc23ddb2a1e0ef241d70b480e2500674a86d88b89" Dec 13 06:34:02 crc kubenswrapper[5048]: I1213 06:34:02.857918 5048 scope.go:117] "RemoveContainer" containerID="ddd368b360bba86c9c78307cf7fe5707ca0649ff3e24f3b87ba8bc8f2b36fc8f" Dec 13 06:34:03 crc kubenswrapper[5048]: I1213 06:34:03.059474 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-zm7qx_60f5ae10-2f86-46f8-b613-f017b8753690/marketplace-operator/1.log" Dec 13 06:34:03 crc kubenswrapper[5048]: I1213 06:34:03.683046 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" Dec 13 06:34:03 crc kubenswrapper[5048]: I1213 06:34:03.684179 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:03 crc kubenswrapper[5048]: I1213 06:34:03.684464 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:03 crc kubenswrapper[5048]: I1213 06:34:03.684696 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:03 crc kubenswrapper[5048]: I1213 06:34:03.684914 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:03 crc kubenswrapper[5048]: I1213 06:34:03.685137 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:03 crc kubenswrapper[5048]: I1213 06:34:03.685365 5048 status_manager.go:851] "Failed to get status for pod" podUID="adb2a7ef-7786-408c-9e37-2b577af9b3ac" pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-66df7c8f76-wcscw\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:03 crc kubenswrapper[5048]: I1213 06:34:03.685656 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:03 crc kubenswrapper[5048]: I1213 06:34:03.685906 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:03 crc kubenswrapper[5048]: I1213 06:34:03.686193 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:03 crc kubenswrapper[5048]: I1213 06:34:03.686478 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:03 crc kubenswrapper[5048]: I1213 06:34:03.686757 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:03 crc kubenswrapper[5048]: E1213 06:34:03.763091 5048 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.102.83.251:6443: connect: connection refused" pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" volumeName="registry-storage" Dec 13 06:34:04 crc kubenswrapper[5048]: I1213 06:34:04.759420 5048 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" Dec 13 06:34:04 crc kubenswrapper[5048]: I1213 06:34:04.761066 5048 scope.go:117] "RemoveContainer" containerID="46a3ec48c5fc8e47ed316f37e3e09ed748fd8df9bd7a24dbe03af86e70a4c351" Dec 13 06:34:04 crc kubenswrapper[5048]: E1213 06:34:04.761405 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-zm7qx_openshift-marketplace(60f5ae10-2f86-46f8-b613-f017b8753690)\"" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" Dec 13 06:34:05 crc kubenswrapper[5048]: I1213 06:34:05.424627 5048 patch_prober.go:28] interesting pod/route-controller-manager-74dc84cd6c-9769d container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 13 06:34:05 crc kubenswrapper[5048]: I1213 06:34:05.424690 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 13 06:34:05 crc kubenswrapper[5048]: I1213 06:34:05.566593 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:34:05 crc kubenswrapper[5048]: I1213 06:34:05.567349 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:05 crc kubenswrapper[5048]: I1213 06:34:05.567943 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:05 crc kubenswrapper[5048]: I1213 06:34:05.568129 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:05 crc kubenswrapper[5048]: I1213 06:34:05.568283 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:05 crc kubenswrapper[5048]: I1213 06:34:05.568516 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:05 crc kubenswrapper[5048]: I1213 06:34:05.568808 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:05 crc kubenswrapper[5048]: I1213 06:34:05.571272 5048 status_manager.go:851] "Failed to get status for pod" podUID="adb2a7ef-7786-408c-9e37-2b577af9b3ac" pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-66df7c8f76-wcscw\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:05 crc kubenswrapper[5048]: I1213 06:34:05.571847 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:05 crc kubenswrapper[5048]: I1213 06:34:05.572181 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:05 crc kubenswrapper[5048]: I1213 06:34:05.572465 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:05 crc kubenswrapper[5048]: I1213 06:34:05.572750 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:05 crc kubenswrapper[5048]: I1213 06:34:05.580933 5048 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="5e418ae3-9af9-445f-9b2d-c58699743512" Dec 13 06:34:05 crc kubenswrapper[5048]: I1213 06:34:05.580972 5048 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="5e418ae3-9af9-445f-9b2d-c58699743512" Dec 13 06:34:05 crc kubenswrapper[5048]: E1213 06:34:05.581393 5048 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:34:05 crc kubenswrapper[5048]: I1213 06:34:05.581974 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:34:05 crc kubenswrapper[5048]: W1213 06:34:05.599405 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-f2b5311283a5cda690f05ad349082965dece38c3a7dd13e29ccf897f79554ab1 WatchSource:0}: Error finding container f2b5311283a5cda690f05ad349082965dece38c3a7dd13e29ccf897f79554ab1: Status 404 returned error can't find the container with id f2b5311283a5cda690f05ad349082965dece38c3a7dd13e29ccf897f79554ab1 Dec 13 06:34:05 crc kubenswrapper[5048]: I1213 06:34:05.898419 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" podUID="aa6cce4c-6bc2-469b-9062-e928744616db" containerName="oauth-openshift" containerID="cri-o://59438fd1a765171cdab7887f9dcf93f2bdcb06b763d2382439cebf62accdff28" gracePeriod=15 Dec 13 06:34:06 crc kubenswrapper[5048]: I1213 06:34:06.077013 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"f2b5311283a5cda690f05ad349082965dece38c3a7dd13e29ccf897f79554ab1"} Dec 13 06:34:06 crc kubenswrapper[5048]: I1213 06:34:06.577782 5048 status_manager.go:851] "Failed to get status for pod" podUID="adb2a7ef-7786-408c-9e37-2b577af9b3ac" pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-66df7c8f76-wcscw\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:06 crc kubenswrapper[5048]: I1213 06:34:06.578367 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:06 crc kubenswrapper[5048]: I1213 06:34:06.578724 5048 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:06 crc kubenswrapper[5048]: I1213 06:34:06.579078 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:06 crc kubenswrapper[5048]: I1213 06:34:06.579706 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:06 crc kubenswrapper[5048]: I1213 06:34:06.580231 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:06 crc kubenswrapper[5048]: I1213 06:34:06.580832 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:06.581594 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:06.582120 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:06.582690 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:06.583167 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:06.583683 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: E1213 06:34:06.968129 5048 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="7s" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.086903 5048 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="1bb3402cbfde05bce9b71f5fdae0e498692a498fd2a42a630b693c18acca26fd" exitCode=0 Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.086994 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"1bb3402cbfde05bce9b71f5fdae0e498692a498fd2a42a630b693c18acca26fd"} Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.087347 5048 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="5e418ae3-9af9-445f-9b2d-c58699743512" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.087379 5048 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="5e418ae3-9af9-445f-9b2d-c58699743512" Dec 13 06:34:07 crc kubenswrapper[5048]: E1213 06:34:07.087982 5048 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.087987 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.088588 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.089223 5048 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.089416 5048 generic.go:334] "Generic (PLEG): container finished" podID="aa6cce4c-6bc2-469b-9062-e928744616db" containerID="59438fd1a765171cdab7887f9dcf93f2bdcb06b763d2382439cebf62accdff28" exitCode=0 Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.089528 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.089539 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" event={"ID":"aa6cce4c-6bc2-469b-9062-e928744616db","Type":"ContainerDied","Data":"59438fd1a765171cdab7887f9dcf93f2bdcb06b763d2382439cebf62accdff28"} Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.089937 5048 status_manager.go:851] "Failed to get status for pod" podUID="adb2a7ef-7786-408c-9e37-2b577af9b3ac" pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-66df7c8f76-wcscw\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.090315 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.090851 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.091490 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.091762 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.092002 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.092290 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.092758 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.094588 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.094643 5048 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa" exitCode=1 Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.094673 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa"} Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.095083 5048 scope.go:117] "RemoveContainer" containerID="3b83258f7b0d976e4828c2e09002582ab48211a67538cbed88289a15ea3118aa" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.095705 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.096344 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.096954 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.097465 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.097690 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.097965 5048 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.098356 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.098747 5048 status_manager.go:851] "Failed to get status for pod" podUID="adb2a7ef-7786-408c-9e37-2b577af9b3ac" pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-66df7c8f76-wcscw\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.099128 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.099587 5048 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.099921 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.100287 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.100686 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.405262 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.405984 5048 status_manager.go:851] "Failed to get status for pod" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/pods/route-controller-manager-74dc84cd6c-9769d\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.406250 5048 status_manager.go:851] "Failed to get status for pod" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" pod="openshift-marketplace/certified-operators-lqvmp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-lqvmp\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.406472 5048 status_manager.go:851] "Failed to get status for pod" podUID="aa6cce4c-6bc2-469b-9062-e928744616db" pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-dwk4m\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.406672 5048 status_manager.go:851] "Failed to get status for pod" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" pod="openshift-marketplace/redhat-operators-l9l5s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-l9l5s\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.406864 5048 status_manager.go:851] "Failed to get status for pod" podUID="268d907d-1730-456e-8e52-67e58aca607b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.407058 5048 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.407256 5048 status_manager.go:851] "Failed to get status for pod" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" pod="openshift-marketplace/certified-operators-hjls5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-hjls5\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.407496 5048 status_manager.go:851] "Failed to get status for pod" podUID="adb2a7ef-7786-408c-9e37-2b577af9b3ac" pod="openshift-image-registry/image-registry-66df7c8f76-wcscw" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-66df7c8f76-wcscw\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.407691 5048 status_manager.go:851] "Failed to get status for pod" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-zm7qx\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.407872 5048 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.408065 5048 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.408267 5048 status_manager.go:851] "Failed to get status for pod" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" pod="openshift-marketplace/community-operators-q8ltl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q8ltl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.408652 5048 status_manager.go:851] "Failed to get status for pod" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" pod="openshift-marketplace/redhat-marketplace-rb7fl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-rb7fl\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.409038 5048 status_manager.go:851] "Failed to get status for pod" podUID="07531c82-87d1-409f-9c5a-4910633b5786" pod="openshift-marketplace/community-operators-vfwpt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-vfwpt\": dial tcp 38.102.83.251:6443: connect: connection refused" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.440920 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/aa6cce4c-6bc2-469b-9062-e928744616db-audit-dir\") pod \"aa6cce4c-6bc2-469b-9062-e928744616db\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.441003 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-serving-cert\") pod \"aa6cce4c-6bc2-469b-9062-e928744616db\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.441038 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-trusted-ca-bundle\") pod \"aa6cce4c-6bc2-469b-9062-e928744616db\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.441081 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-template-error\") pod \"aa6cce4c-6bc2-469b-9062-e928744616db\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.441103 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-session\") pod \"aa6cce4c-6bc2-469b-9062-e928744616db\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.441025 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/aa6cce4c-6bc2-469b-9062-e928744616db-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "aa6cce4c-6bc2-469b-9062-e928744616db" (UID: "aa6cce4c-6bc2-469b-9062-e928744616db"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.441129 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-template-login\") pod \"aa6cce4c-6bc2-469b-9062-e928744616db\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.441342 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-ocp-branding-template\") pod \"aa6cce4c-6bc2-469b-9062-e928744616db\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.441471 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-template-provider-selection\") pod \"aa6cce4c-6bc2-469b-9062-e928744616db\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.441514 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-service-ca\") pod \"aa6cce4c-6bc2-469b-9062-e928744616db\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.441600 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-idp-0-file-data\") pod \"aa6cce4c-6bc2-469b-9062-e928744616db\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.441732 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-cliconfig\") pod \"aa6cce4c-6bc2-469b-9062-e928744616db\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.441777 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-audit-policies\") pod \"aa6cce4c-6bc2-469b-9062-e928744616db\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.441841 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-router-certs\") pod \"aa6cce4c-6bc2-469b-9062-e928744616db\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.441901 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pw47j\" (UniqueName: \"kubernetes.io/projected/aa6cce4c-6bc2-469b-9062-e928744616db-kube-api-access-pw47j\") pod \"aa6cce4c-6bc2-469b-9062-e928744616db\" (UID: \"aa6cce4c-6bc2-469b-9062-e928744616db\") " Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.442422 5048 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/aa6cce4c-6bc2-469b-9062-e928744616db-audit-dir\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.444128 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "aa6cce4c-6bc2-469b-9062-e928744616db" (UID: "aa6cce4c-6bc2-469b-9062-e928744616db"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.444717 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "aa6cce4c-6bc2-469b-9062-e928744616db" (UID: "aa6cce4c-6bc2-469b-9062-e928744616db"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.444772 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "aa6cce4c-6bc2-469b-9062-e928744616db" (UID: "aa6cce4c-6bc2-469b-9062-e928744616db"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.447756 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "aa6cce4c-6bc2-469b-9062-e928744616db" (UID: "aa6cce4c-6bc2-469b-9062-e928744616db"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.453481 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa6cce4c-6bc2-469b-9062-e928744616db-kube-api-access-pw47j" (OuterVolumeSpecName: "kube-api-access-pw47j") pod "aa6cce4c-6bc2-469b-9062-e928744616db" (UID: "aa6cce4c-6bc2-469b-9062-e928744616db"). InnerVolumeSpecName "kube-api-access-pw47j". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.453670 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "aa6cce4c-6bc2-469b-9062-e928744616db" (UID: "aa6cce4c-6bc2-469b-9062-e928744616db"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.470753 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "aa6cce4c-6bc2-469b-9062-e928744616db" (UID: "aa6cce4c-6bc2-469b-9062-e928744616db"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.471023 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "aa6cce4c-6bc2-469b-9062-e928744616db" (UID: "aa6cce4c-6bc2-469b-9062-e928744616db"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.471309 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "aa6cce4c-6bc2-469b-9062-e928744616db" (UID: "aa6cce4c-6bc2-469b-9062-e928744616db"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.471477 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "aa6cce4c-6bc2-469b-9062-e928744616db" (UID: "aa6cce4c-6bc2-469b-9062-e928744616db"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.471843 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "aa6cce4c-6bc2-469b-9062-e928744616db" (UID: "aa6cce4c-6bc2-469b-9062-e928744616db"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.472129 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "aa6cce4c-6bc2-469b-9062-e928744616db" (UID: "aa6cce4c-6bc2-469b-9062-e928744616db"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.472355 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "aa6cce4c-6bc2-469b-9062-e928744616db" (UID: "aa6cce4c-6bc2-469b-9062-e928744616db"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.543825 5048 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.544301 5048 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.544408 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pw47j\" (UniqueName: \"kubernetes.io/projected/aa6cce4c-6bc2-469b-9062-e928744616db-kube-api-access-pw47j\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.544535 5048 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.544637 5048 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.544717 5048 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.544805 5048 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.544901 5048 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.545005 5048 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.545103 5048 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.545194 5048 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.545284 5048 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.545385 5048 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/aa6cce4c-6bc2-469b-9062-e928744616db-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.565786 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:34:07 crc kubenswrapper[5048]: I1213 06:34:07.566352 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:34:08 crc kubenswrapper[5048]: I1213 06:34:08.105393 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"bfcd488277778c392f1b6e7a460563b506d8063f8e1f554b4552e3b3eea3978a"} Dec 13 06:34:08 crc kubenswrapper[5048]: I1213 06:34:08.106880 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"ac4e20e34271649e59412adaed3a0facf4c33a20738a47c793683a32ca78e797"} Dec 13 06:34:08 crc kubenswrapper[5048]: I1213 06:34:08.107024 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"7bd0927a8053c62da6528230846f26984944a7a0ad9b5d711ce6b77f20b9dfda"} Dec 13 06:34:08 crc kubenswrapper[5048]: I1213 06:34:08.107128 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"2bd2eadaad2b6d32cdcbd6b3a1eef011fd5adbb5ad7f78b6f1da117967adcd66"} Dec 13 06:34:08 crc kubenswrapper[5048]: I1213 06:34:08.108841 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 13 06:34:08 crc kubenswrapper[5048]: I1213 06:34:08.109002 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"75909f0e75266a2a1458b4889f153b2f8e6ce961f5c10c6ff9163a5791a4a57e"} Dec 13 06:34:08 crc kubenswrapper[5048]: I1213 06:34:08.113730 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" event={"ID":"aa6cce4c-6bc2-469b-9062-e928744616db","Type":"ContainerDied","Data":"47a0435c629513e5dd679c6384ff3ae93abdaf8547d488317e60f8a02abc7096"} Dec 13 06:34:08 crc kubenswrapper[5048]: I1213 06:34:08.113797 5048 scope.go:117] "RemoveContainer" containerID="59438fd1a765171cdab7887f9dcf93f2bdcb06b763d2382439cebf62accdff28" Dec 13 06:34:08 crc kubenswrapper[5048]: I1213 06:34:08.114015 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-dwk4m" Dec 13 06:34:09 crc kubenswrapper[5048]: I1213 06:34:09.123384 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"7f15f1ccd0cb3dd8f94ea7e9e64badba92379bbfc82aaa96262a1adcbbbd227e"} Dec 13 06:34:09 crc kubenswrapper[5048]: I1213 06:34:09.123576 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:34:09 crc kubenswrapper[5048]: I1213 06:34:09.123704 5048 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="5e418ae3-9af9-445f-9b2d-c58699743512" Dec 13 06:34:09 crc kubenswrapper[5048]: I1213 06:34:09.123724 5048 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="5e418ae3-9af9-445f-9b2d-c58699743512" Dec 13 06:34:10 crc kubenswrapper[5048]: I1213 06:34:10.583090 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:34:10 crc kubenswrapper[5048]: I1213 06:34:10.583486 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:34:10 crc kubenswrapper[5048]: I1213 06:34:10.593521 5048 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]log ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]etcd ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/start-apiserver-admission-initializer ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/openshift.io-api-request-count-filter ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/openshift.io-startkubeinformers ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/generic-apiserver-start-informers ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/priority-and-fairness-config-consumer ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/priority-and-fairness-filter ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/storage-object-count-tracker-hook ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/start-apiextensions-informers ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/start-apiextensions-controllers ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/crd-informer-synced ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/start-system-namespaces-controller ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/start-cluster-authentication-info-controller ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/start-legacy-token-tracking-controller ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/start-service-ip-repair-controllers ok Dec 13 06:34:10 crc kubenswrapper[5048]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/scheduling/bootstrap-system-priority-classes ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/priority-and-fairness-config-producer ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/bootstrap-controller ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/start-kube-aggregator-informers ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/apiservice-status-local-available-controller ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/apiservice-status-remote-available-controller ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/apiservice-registration-controller ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/apiservice-wait-for-first-sync ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/apiservice-discovery-controller ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/kube-apiserver-autoregistration ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]autoregister-completion ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/apiservice-openapi-controller ok Dec 13 06:34:10 crc kubenswrapper[5048]: [+]poststarthook/apiservice-openapiv3-controller ok Dec 13 06:34:10 crc kubenswrapper[5048]: livez check failed Dec 13 06:34:10 crc kubenswrapper[5048]: I1213 06:34:10.593605 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 13 06:34:11 crc kubenswrapper[5048]: I1213 06:34:11.265379 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 13 06:34:14 crc kubenswrapper[5048]: W1213 06:34:14.357168 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod27b96e48_77ff_4f71_a919_9f30814704a7.slice/crio-4f290f59ab2e92c736faa6bf051d981a5ac6eb7442146807cedf96600695bf5a WatchSource:0}: Error finding container 4f290f59ab2e92c736faa6bf051d981a5ac6eb7442146807cedf96600695bf5a: Status 404 returned error can't find the container with id 4f290f59ab2e92c736faa6bf051d981a5ac6eb7442146807cedf96600695bf5a Dec 13 06:34:14 crc kubenswrapper[5048]: I1213 06:34:14.376265 5048 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:34:14 crc kubenswrapper[5048]: I1213 06:34:14.536822 5048 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="b99016d6-7c9f-422e-8b5e-2dde9994f5df" Dec 13 06:34:14 crc kubenswrapper[5048]: I1213 06:34:14.631132 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 13 06:34:14 crc kubenswrapper[5048]: I1213 06:34:14.639616 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 13 06:34:15 crc kubenswrapper[5048]: I1213 06:34:15.162455 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" event={"ID":"27b96e48-77ff-4f71-a919-9f30814704a7","Type":"ContainerStarted","Data":"f6082ba2f836efd335828d364876eb245329d7d38146be8961ecd24c33a1acbb"} Dec 13 06:34:15 crc kubenswrapper[5048]: I1213 06:34:15.162510 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" event={"ID":"27b96e48-77ff-4f71-a919-9f30814704a7","Type":"ContainerStarted","Data":"4f290f59ab2e92c736faa6bf051d981a5ac6eb7442146807cedf96600695bf5a"} Dec 13 06:34:15 crc kubenswrapper[5048]: I1213 06:34:15.162713 5048 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="5e418ae3-9af9-445f-9b2d-c58699743512" Dec 13 06:34:15 crc kubenswrapper[5048]: I1213 06:34:15.162741 5048 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="5e418ae3-9af9-445f-9b2d-c58699743512" Dec 13 06:34:15 crc kubenswrapper[5048]: I1213 06:34:15.162810 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:34:15 crc kubenswrapper[5048]: I1213 06:34:15.166482 5048 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="b99016d6-7c9f-422e-8b5e-2dde9994f5df" Dec 13 06:34:15 crc kubenswrapper[5048]: I1213 06:34:15.170561 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:34:15 crc kubenswrapper[5048]: I1213 06:34:15.425030 5048 patch_prober.go:28] interesting pod/route-controller-manager-74dc84cd6c-9769d container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 13 06:34:15 crc kubenswrapper[5048]: I1213 06:34:15.425102 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 13 06:34:19 crc kubenswrapper[5048]: I1213 06:34:19.567416 5048 scope.go:117] "RemoveContainer" containerID="46a3ec48c5fc8e47ed316f37e3e09ed748fd8df9bd7a24dbe03af86e70a4c351" Dec 13 06:34:20 crc kubenswrapper[5048]: I1213 06:34:20.195308 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-zm7qx_60f5ae10-2f86-46f8-b613-f017b8753690/marketplace-operator/1.log" Dec 13 06:34:20 crc kubenswrapper[5048]: I1213 06:34:20.195595 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" event={"ID":"60f5ae10-2f86-46f8-b613-f017b8753690","Type":"ContainerStarted","Data":"3ad09671069fe23f98d2cd657e5b03c8e9347cf52e8533e300243ef22004b429"} Dec 13 06:34:21 crc kubenswrapper[5048]: I1213 06:34:21.201916 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-zm7qx_60f5ae10-2f86-46f8-b613-f017b8753690/marketplace-operator/2.log" Dec 13 06:34:21 crc kubenswrapper[5048]: I1213 06:34:21.202417 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-zm7qx_60f5ae10-2f86-46f8-b613-f017b8753690/marketplace-operator/1.log" Dec 13 06:34:21 crc kubenswrapper[5048]: I1213 06:34:21.202482 5048 generic.go:334] "Generic (PLEG): container finished" podID="60f5ae10-2f86-46f8-b613-f017b8753690" containerID="3ad09671069fe23f98d2cd657e5b03c8e9347cf52e8533e300243ef22004b429" exitCode=1 Dec 13 06:34:21 crc kubenswrapper[5048]: I1213 06:34:21.202515 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" event={"ID":"60f5ae10-2f86-46f8-b613-f017b8753690","Type":"ContainerDied","Data":"3ad09671069fe23f98d2cd657e5b03c8e9347cf52e8533e300243ef22004b429"} Dec 13 06:34:21 crc kubenswrapper[5048]: I1213 06:34:21.202551 5048 scope.go:117] "RemoveContainer" containerID="46a3ec48c5fc8e47ed316f37e3e09ed748fd8df9bd7a24dbe03af86e70a4c351" Dec 13 06:34:21 crc kubenswrapper[5048]: I1213 06:34:21.203051 5048 scope.go:117] "RemoveContainer" containerID="3ad09671069fe23f98d2cd657e5b03c8e9347cf52e8533e300243ef22004b429" Dec 13 06:34:21 crc kubenswrapper[5048]: E1213 06:34:21.203262 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-zm7qx_openshift-marketplace(60f5ae10-2f86-46f8-b613-f017b8753690)\"" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" Dec 13 06:34:21 crc kubenswrapper[5048]: I1213 06:34:21.270850 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 13 06:34:22 crc kubenswrapper[5048]: I1213 06:34:22.210254 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-zm7qx_60f5ae10-2f86-46f8-b613-f017b8753690/marketplace-operator/2.log" Dec 13 06:34:22 crc kubenswrapper[5048]: I1213 06:34:22.210836 5048 scope.go:117] "RemoveContainer" containerID="3ad09671069fe23f98d2cd657e5b03c8e9347cf52e8533e300243ef22004b429" Dec 13 06:34:22 crc kubenswrapper[5048]: E1213 06:34:22.211030 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-zm7qx_openshift-marketplace(60f5ae10-2f86-46f8-b613-f017b8753690)\"" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" Dec 13 06:34:24 crc kubenswrapper[5048]: I1213 06:34:24.663824 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Dec 13 06:34:24 crc kubenswrapper[5048]: I1213 06:34:24.758639 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" Dec 13 06:34:24 crc kubenswrapper[5048]: I1213 06:34:24.758705 5048 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" Dec 13 06:34:24 crc kubenswrapper[5048]: I1213 06:34:24.759193 5048 scope.go:117] "RemoveContainer" containerID="3ad09671069fe23f98d2cd657e5b03c8e9347cf52e8533e300243ef22004b429" Dec 13 06:34:24 crc kubenswrapper[5048]: E1213 06:34:24.759391 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-zm7qx_openshift-marketplace(60f5ae10-2f86-46f8-b613-f017b8753690)\"" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" Dec 13 06:34:25 crc kubenswrapper[5048]: I1213 06:34:25.227305 5048 scope.go:117] "RemoveContainer" containerID="3ad09671069fe23f98d2cd657e5b03c8e9347cf52e8533e300243ef22004b429" Dec 13 06:34:25 crc kubenswrapper[5048]: E1213 06:34:25.227533 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-zm7qx_openshift-marketplace(60f5ae10-2f86-46f8-b613-f017b8753690)\"" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" Dec 13 06:34:25 crc kubenswrapper[5048]: I1213 06:34:25.333976 5048 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Dec 13 06:34:25 crc kubenswrapper[5048]: I1213 06:34:25.361258 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" Dec 13 06:34:25 crc kubenswrapper[5048]: I1213 06:34:25.364368 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Dec 13 06:34:25 crc kubenswrapper[5048]: I1213 06:34:25.415700 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 13 06:34:25 crc kubenswrapper[5048]: I1213 06:34:25.473942 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Dec 13 06:34:25 crc kubenswrapper[5048]: I1213 06:34:25.598912 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Dec 13 06:34:25 crc kubenswrapper[5048]: I1213 06:34:25.699505 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Dec 13 06:34:26 crc kubenswrapper[5048]: I1213 06:34:26.035045 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Dec 13 06:34:26 crc kubenswrapper[5048]: I1213 06:34:26.100881 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Dec 13 06:34:26 crc kubenswrapper[5048]: I1213 06:34:26.147721 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Dec 13 06:34:26 crc kubenswrapper[5048]: I1213 06:34:26.221282 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Dec 13 06:34:26 crc kubenswrapper[5048]: I1213 06:34:26.383514 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Dec 13 06:34:26 crc kubenswrapper[5048]: I1213 06:34:26.921933 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Dec 13 06:34:26 crc kubenswrapper[5048]: I1213 06:34:26.922015 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Dec 13 06:34:26 crc kubenswrapper[5048]: I1213 06:34:26.935710 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Dec 13 06:34:26 crc kubenswrapper[5048]: I1213 06:34:26.937113 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Dec 13 06:34:27 crc kubenswrapper[5048]: I1213 06:34:27.135660 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Dec 13 06:34:27 crc kubenswrapper[5048]: I1213 06:34:27.192629 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Dec 13 06:34:27 crc kubenswrapper[5048]: I1213 06:34:27.210743 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Dec 13 06:34:27 crc kubenswrapper[5048]: I1213 06:34:27.266471 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Dec 13 06:34:27 crc kubenswrapper[5048]: I1213 06:34:27.324129 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Dec 13 06:34:27 crc kubenswrapper[5048]: I1213 06:34:27.602743 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Dec 13 06:34:27 crc kubenswrapper[5048]: I1213 06:34:27.634526 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Dec 13 06:34:27 crc kubenswrapper[5048]: I1213 06:34:27.781386 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Dec 13 06:34:27 crc kubenswrapper[5048]: I1213 06:34:27.872895 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Dec 13 06:34:27 crc kubenswrapper[5048]: I1213 06:34:27.948937 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Dec 13 06:34:27 crc kubenswrapper[5048]: I1213 06:34:27.963531 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Dec 13 06:34:28 crc kubenswrapper[5048]: I1213 06:34:28.213354 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 13 06:34:28 crc kubenswrapper[5048]: I1213 06:34:28.224504 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Dec 13 06:34:28 crc kubenswrapper[5048]: I1213 06:34:28.256060 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Dec 13 06:34:28 crc kubenswrapper[5048]: I1213 06:34:28.313717 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Dec 13 06:34:28 crc kubenswrapper[5048]: I1213 06:34:28.372036 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Dec 13 06:34:28 crc kubenswrapper[5048]: I1213 06:34:28.410307 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Dec 13 06:34:28 crc kubenswrapper[5048]: I1213 06:34:28.505354 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Dec 13 06:34:28 crc kubenswrapper[5048]: I1213 06:34:28.526043 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Dec 13 06:34:28 crc kubenswrapper[5048]: I1213 06:34:28.604474 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Dec 13 06:34:28 crc kubenswrapper[5048]: I1213 06:34:28.644304 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 13 06:34:28 crc kubenswrapper[5048]: I1213 06:34:28.650372 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Dec 13 06:34:28 crc kubenswrapper[5048]: I1213 06:34:28.672740 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Dec 13 06:34:28 crc kubenswrapper[5048]: I1213 06:34:28.750160 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Dec 13 06:34:28 crc kubenswrapper[5048]: I1213 06:34:28.824637 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Dec 13 06:34:28 crc kubenswrapper[5048]: I1213 06:34:28.906536 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Dec 13 06:34:29 crc kubenswrapper[5048]: I1213 06:34:29.038926 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Dec 13 06:34:29 crc kubenswrapper[5048]: I1213 06:34:29.040786 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Dec 13 06:34:29 crc kubenswrapper[5048]: I1213 06:34:29.100973 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Dec 13 06:34:29 crc kubenswrapper[5048]: I1213 06:34:29.151091 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Dec 13 06:34:29 crc kubenswrapper[5048]: I1213 06:34:29.171382 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Dec 13 06:34:29 crc kubenswrapper[5048]: I1213 06:34:29.221709 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Dec 13 06:34:29 crc kubenswrapper[5048]: I1213 06:34:29.248932 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Dec 13 06:34:29 crc kubenswrapper[5048]: I1213 06:34:29.429033 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Dec 13 06:34:29 crc kubenswrapper[5048]: I1213 06:34:29.461476 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Dec 13 06:34:29 crc kubenswrapper[5048]: I1213 06:34:29.518366 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Dec 13 06:34:29 crc kubenswrapper[5048]: I1213 06:34:29.519560 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Dec 13 06:34:29 crc kubenswrapper[5048]: I1213 06:34:29.537110 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Dec 13 06:34:29 crc kubenswrapper[5048]: I1213 06:34:29.602335 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Dec 13 06:34:29 crc kubenswrapper[5048]: I1213 06:34:29.626913 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Dec 13 06:34:29 crc kubenswrapper[5048]: I1213 06:34:29.762326 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Dec 13 06:34:29 crc kubenswrapper[5048]: I1213 06:34:29.812239 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Dec 13 06:34:29 crc kubenswrapper[5048]: I1213 06:34:29.812497 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Dec 13 06:34:29 crc kubenswrapper[5048]: I1213 06:34:29.816909 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Dec 13 06:34:29 crc kubenswrapper[5048]: I1213 06:34:29.834782 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Dec 13 06:34:29 crc kubenswrapper[5048]: I1213 06:34:29.882932 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Dec 13 06:34:29 crc kubenswrapper[5048]: I1213 06:34:29.890866 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Dec 13 06:34:29 crc kubenswrapper[5048]: I1213 06:34:29.948321 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.012081 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.034312 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.043932 5048 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.051281 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" podStartSLOduration=49.0512482 podStartE2EDuration="49.0512482s" podCreationTimestamp="2025-12-13 06:33:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:34:15.181132566 +0000 UTC m=+289.047727167" watchObservedRunningTime="2025-12-13 06:34:30.0512482 +0000 UTC m=+303.917842821" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.051793 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=40.051780695 podStartE2EDuration="40.051780695s" podCreationTimestamp="2025-12-13 06:33:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:34:14.512394749 +0000 UTC m=+288.378989350" watchObservedRunningTime="2025-12-13 06:34:30.051780695 +0000 UTC m=+303.918375316" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.053687 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" podStartSLOduration=49.05366443 podStartE2EDuration="49.05366443s" podCreationTimestamp="2025-12-13 06:33:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:34:14.381289406 +0000 UTC m=+288.247884007" watchObservedRunningTime="2025-12-13 06:34:30.05366443 +0000 UTC m=+303.920259051" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.054526 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rb7fl","openshift-kube-apiserver/kube-apiserver-crc","openshift-marketplace/certified-operators-lqvmp","openshift-marketplace/redhat-operators-l9l5s","openshift-marketplace/certified-operators-hjls5","openshift-authentication/oauth-openshift-558db77b4-dwk4m","openshift-marketplace/community-operators-q8ltl","openshift-marketplace/community-operators-vfwpt"] Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.054667 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.054723 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5d89566846-prjbf"] Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.055255 5048 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="5e418ae3-9af9-445f-9b2d-c58699743512" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.055304 5048 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="5e418ae3-9af9-445f-9b2d-c58699743512" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.069335 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.082390 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=16.082371666 podStartE2EDuration="16.082371666s" podCreationTimestamp="2025-12-13 06:34:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:34:30.081261684 +0000 UTC m=+303.947856355" watchObservedRunningTime="2025-12-13 06:34:30.082371666 +0000 UTC m=+303.948966257" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.263666 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.277814 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.384314 5048 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.392488 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.440394 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.478954 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.520469 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.574797 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07531c82-87d1-409f-9c5a-4910633b5786" path="/var/lib/kubelet/pods/07531c82-87d1-409f-9c5a-4910633b5786/volumes" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.576071 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" path="/var/lib/kubelet/pods/1ea76a75-e8de-4a91-89af-726df36e8a21/volumes" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.577496 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" path="/var/lib/kubelet/pods/57717114-1abd-46bf-bdbd-0a785d734cd3/volumes" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.578882 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" path="/var/lib/kubelet/pods/6885bcbf-dd86-4e5a-bd21-92395c5ae676/volumes" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.579650 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa6cce4c-6bc2-469b-9062-e928744616db" path="/var/lib/kubelet/pods/aa6cce4c-6bc2-469b-9062-e928744616db/volumes" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.581012 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" path="/var/lib/kubelet/pods/d5fb554d-84e3-4bf0-857f-a64da6e6a36f/volumes" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.581663 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" path="/var/lib/kubelet/pods/f7b93b87-31d6-4279-8ac9-b834417f66d9/volumes" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.587283 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.634323 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.698184 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.749010 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.824455 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.825832 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.909679 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.933293 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Dec 13 06:34:30 crc kubenswrapper[5048]: I1213 06:34:30.955198 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Dec 13 06:34:31 crc kubenswrapper[5048]: I1213 06:34:31.010741 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Dec 13 06:34:31 crc kubenswrapper[5048]: I1213 06:34:31.087045 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Dec 13 06:34:31 crc kubenswrapper[5048]: I1213 06:34:31.093636 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Dec 13 06:34:31 crc kubenswrapper[5048]: I1213 06:34:31.097789 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Dec 13 06:34:31 crc kubenswrapper[5048]: I1213 06:34:31.130989 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Dec 13 06:34:31 crc kubenswrapper[5048]: I1213 06:34:31.136035 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Dec 13 06:34:31 crc kubenswrapper[5048]: I1213 06:34:31.139576 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Dec 13 06:34:31 crc kubenswrapper[5048]: I1213 06:34:31.255889 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Dec 13 06:34:31 crc kubenswrapper[5048]: I1213 06:34:31.268382 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 13 06:34:31 crc kubenswrapper[5048]: I1213 06:34:31.346902 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Dec 13 06:34:31 crc kubenswrapper[5048]: I1213 06:34:31.434906 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Dec 13 06:34:31 crc kubenswrapper[5048]: I1213 06:34:31.508994 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Dec 13 06:34:31 crc kubenswrapper[5048]: I1213 06:34:31.557927 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 13 06:34:31 crc kubenswrapper[5048]: I1213 06:34:31.621952 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Dec 13 06:34:31 crc kubenswrapper[5048]: I1213 06:34:31.652712 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 13 06:34:31 crc kubenswrapper[5048]: I1213 06:34:31.750558 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Dec 13 06:34:31 crc kubenswrapper[5048]: I1213 06:34:31.821148 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Dec 13 06:34:31 crc kubenswrapper[5048]: I1213 06:34:31.878964 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Dec 13 06:34:31 crc kubenswrapper[5048]: I1213 06:34:31.921006 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Dec 13 06:34:32 crc kubenswrapper[5048]: I1213 06:34:32.060002 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Dec 13 06:34:32 crc kubenswrapper[5048]: I1213 06:34:32.080017 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Dec 13 06:34:32 crc kubenswrapper[5048]: I1213 06:34:32.102838 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Dec 13 06:34:32 crc kubenswrapper[5048]: I1213 06:34:32.123230 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Dec 13 06:34:32 crc kubenswrapper[5048]: I1213 06:34:32.402006 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Dec 13 06:34:32 crc kubenswrapper[5048]: I1213 06:34:32.411060 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Dec 13 06:34:32 crc kubenswrapper[5048]: I1213 06:34:32.447737 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Dec 13 06:34:32 crc kubenswrapper[5048]: I1213 06:34:32.473376 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Dec 13 06:34:32 crc kubenswrapper[5048]: I1213 06:34:32.499398 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Dec 13 06:34:32 crc kubenswrapper[5048]: I1213 06:34:32.512410 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Dec 13 06:34:32 crc kubenswrapper[5048]: I1213 06:34:32.595103 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Dec 13 06:34:32 crc kubenswrapper[5048]: I1213 06:34:32.638922 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Dec 13 06:34:32 crc kubenswrapper[5048]: I1213 06:34:32.665609 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Dec 13 06:34:32 crc kubenswrapper[5048]: I1213 06:34:32.838559 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Dec 13 06:34:32 crc kubenswrapper[5048]: I1213 06:34:32.854997 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Dec 13 06:34:32 crc kubenswrapper[5048]: I1213 06:34:32.893322 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.018370 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.028598 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.182564 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.189771 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.214011 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.228599 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.248647 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.464961 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.467026 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.483743 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.519382 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.522855 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.579501 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.589268 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.617309 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.686894 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.742677 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.750564 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.751853 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.794256 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.834604 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.834764 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.893865 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.900775 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.978516 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.985360 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Dec 13 06:34:33 crc kubenswrapper[5048]: I1213 06:34:33.992919 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Dec 13 06:34:34 crc kubenswrapper[5048]: I1213 06:34:34.043296 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Dec 13 06:34:34 crc kubenswrapper[5048]: I1213 06:34:34.221410 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Dec 13 06:34:34 crc kubenswrapper[5048]: I1213 06:34:34.234411 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Dec 13 06:34:34 crc kubenswrapper[5048]: I1213 06:34:34.358846 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Dec 13 06:34:34 crc kubenswrapper[5048]: I1213 06:34:34.413021 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Dec 13 06:34:34 crc kubenswrapper[5048]: I1213 06:34:34.481674 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Dec 13 06:34:34 crc kubenswrapper[5048]: I1213 06:34:34.487819 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Dec 13 06:34:34 crc kubenswrapper[5048]: I1213 06:34:34.521850 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Dec 13 06:34:34 crc kubenswrapper[5048]: I1213 06:34:34.599107 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Dec 13 06:34:34 crc kubenswrapper[5048]: I1213 06:34:34.607634 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Dec 13 06:34:34 crc kubenswrapper[5048]: I1213 06:34:34.631543 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Dec 13 06:34:34 crc kubenswrapper[5048]: I1213 06:34:34.691175 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Dec 13 06:34:34 crc kubenswrapper[5048]: I1213 06:34:34.710646 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Dec 13 06:34:34 crc kubenswrapper[5048]: I1213 06:34:34.711057 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Dec 13 06:34:34 crc kubenswrapper[5048]: I1213 06:34:34.838726 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Dec 13 06:34:34 crc kubenswrapper[5048]: I1213 06:34:34.844344 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Dec 13 06:34:34 crc kubenswrapper[5048]: I1213 06:34:34.854276 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Dec 13 06:34:34 crc kubenswrapper[5048]: I1213 06:34:34.935847 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Dec 13 06:34:35 crc kubenswrapper[5048]: I1213 06:34:35.235300 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Dec 13 06:34:35 crc kubenswrapper[5048]: I1213 06:34:35.269755 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Dec 13 06:34:35 crc kubenswrapper[5048]: I1213 06:34:35.412261 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Dec 13 06:34:35 crc kubenswrapper[5048]: I1213 06:34:35.450197 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Dec 13 06:34:35 crc kubenswrapper[5048]: I1213 06:34:35.477319 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Dec 13 06:34:35 crc kubenswrapper[5048]: I1213 06:34:35.513111 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Dec 13 06:34:35 crc kubenswrapper[5048]: I1213 06:34:35.534028 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Dec 13 06:34:35 crc kubenswrapper[5048]: I1213 06:34:35.546306 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Dec 13 06:34:35 crc kubenswrapper[5048]: I1213 06:34:35.605899 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Dec 13 06:34:35 crc kubenswrapper[5048]: I1213 06:34:35.612644 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Dec 13 06:34:35 crc kubenswrapper[5048]: I1213 06:34:35.704617 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 13 06:34:35 crc kubenswrapper[5048]: I1213 06:34:35.749229 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Dec 13 06:34:35 crc kubenswrapper[5048]: I1213 06:34:35.796875 5048 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 13 06:34:35 crc kubenswrapper[5048]: I1213 06:34:35.797146 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://98b93aef31307e077f3cd0616fadc36c2475728e96423a52f5b253b522a06c94" gracePeriod=5 Dec 13 06:34:35 crc kubenswrapper[5048]: I1213 06:34:35.893419 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Dec 13 06:34:35 crc kubenswrapper[5048]: I1213 06:34:35.903890 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Dec 13 06:34:35 crc kubenswrapper[5048]: I1213 06:34:35.992778 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Dec 13 06:34:36 crc kubenswrapper[5048]: I1213 06:34:36.080381 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 13 06:34:36 crc kubenswrapper[5048]: I1213 06:34:36.088424 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Dec 13 06:34:36 crc kubenswrapper[5048]: I1213 06:34:36.218257 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Dec 13 06:34:36 crc kubenswrapper[5048]: I1213 06:34:36.273974 5048 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Dec 13 06:34:36 crc kubenswrapper[5048]: I1213 06:34:36.292302 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Dec 13 06:34:36 crc kubenswrapper[5048]: I1213 06:34:36.419015 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Dec 13 06:34:36 crc kubenswrapper[5048]: I1213 06:34:36.449873 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Dec 13 06:34:36 crc kubenswrapper[5048]: I1213 06:34:36.476139 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Dec 13 06:34:36 crc kubenswrapper[5048]: I1213 06:34:36.569279 5048 scope.go:117] "RemoveContainer" containerID="3ad09671069fe23f98d2cd657e5b03c8e9347cf52e8533e300243ef22004b429" Dec 13 06:34:36 crc kubenswrapper[5048]: E1213 06:34:36.569601 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-zm7qx_openshift-marketplace(60f5ae10-2f86-46f8-b613-f017b8753690)\"" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" podUID="60f5ae10-2f86-46f8-b613-f017b8753690" Dec 13 06:34:36 crc kubenswrapper[5048]: I1213 06:34:36.703303 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Dec 13 06:34:36 crc kubenswrapper[5048]: I1213 06:34:36.775705 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Dec 13 06:34:36 crc kubenswrapper[5048]: I1213 06:34:36.841509 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Dec 13 06:34:37 crc kubenswrapper[5048]: I1213 06:34:37.048692 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Dec 13 06:34:37 crc kubenswrapper[5048]: I1213 06:34:37.206983 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 13 06:34:37 crc kubenswrapper[5048]: I1213 06:34:37.244766 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 13 06:34:37 crc kubenswrapper[5048]: I1213 06:34:37.301508 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Dec 13 06:34:37 crc kubenswrapper[5048]: I1213 06:34:37.366730 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Dec 13 06:34:37 crc kubenswrapper[5048]: I1213 06:34:37.401954 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Dec 13 06:34:37 crc kubenswrapper[5048]: I1213 06:34:37.402169 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Dec 13 06:34:37 crc kubenswrapper[5048]: I1213 06:34:37.488830 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Dec 13 06:34:37 crc kubenswrapper[5048]: I1213 06:34:37.564379 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Dec 13 06:34:37 crc kubenswrapper[5048]: I1213 06:34:37.721749 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Dec 13 06:34:37 crc kubenswrapper[5048]: I1213 06:34:37.780507 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 13 06:34:37 crc kubenswrapper[5048]: I1213 06:34:37.789642 5048 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Dec 13 06:34:37 crc kubenswrapper[5048]: I1213 06:34:37.816145 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Dec 13 06:34:37 crc kubenswrapper[5048]: I1213 06:34:37.935694 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Dec 13 06:34:37 crc kubenswrapper[5048]: I1213 06:34:37.964919 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Dec 13 06:34:38 crc kubenswrapper[5048]: I1213 06:34:38.168795 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Dec 13 06:34:38 crc kubenswrapper[5048]: I1213 06:34:38.210837 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-7z7xz"] Dec 13 06:34:38 crc kubenswrapper[5048]: I1213 06:34:38.213351 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Dec 13 06:34:38 crc kubenswrapper[5048]: I1213 06:34:38.276655 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Dec 13 06:34:38 crc kubenswrapper[5048]: I1213 06:34:38.391105 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Dec 13 06:34:38 crc kubenswrapper[5048]: I1213 06:34:38.407111 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Dec 13 06:34:38 crc kubenswrapper[5048]: I1213 06:34:38.671573 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Dec 13 06:34:38 crc kubenswrapper[5048]: I1213 06:34:38.705549 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Dec 13 06:34:38 crc kubenswrapper[5048]: I1213 06:34:38.759187 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Dec 13 06:34:38 crc kubenswrapper[5048]: I1213 06:34:38.778379 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Dec 13 06:34:38 crc kubenswrapper[5048]: I1213 06:34:38.780055 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Dec 13 06:34:38 crc kubenswrapper[5048]: I1213 06:34:38.851842 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Dec 13 06:34:39 crc kubenswrapper[5048]: I1213 06:34:39.008011 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Dec 13 06:34:39 crc kubenswrapper[5048]: I1213 06:34:39.249175 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Dec 13 06:34:39 crc kubenswrapper[5048]: I1213 06:34:39.264956 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Dec 13 06:34:39 crc kubenswrapper[5048]: I1213 06:34:39.365800 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Dec 13 06:34:39 crc kubenswrapper[5048]: I1213 06:34:39.367605 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Dec 13 06:34:39 crc kubenswrapper[5048]: I1213 06:34:39.398946 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Dec 13 06:34:39 crc kubenswrapper[5048]: I1213 06:34:39.537848 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Dec 13 06:34:39 crc kubenswrapper[5048]: I1213 06:34:39.585895 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Dec 13 06:34:39 crc kubenswrapper[5048]: I1213 06:34:39.930178 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Dec 13 06:34:40 crc kubenswrapper[5048]: I1213 06:34:40.346406 5048 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Dec 13 06:34:40 crc kubenswrapper[5048]: I1213 06:34:40.507453 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Dec 13 06:34:40 crc kubenswrapper[5048]: I1213 06:34:40.563994 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Dec 13 06:34:41 crc kubenswrapper[5048]: I1213 06:34:41.023846 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5d89566846-prjbf"] Dec 13 06:34:41 crc kubenswrapper[5048]: I1213 06:34:41.024102 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" podUID="27b96e48-77ff-4f71-a919-9f30814704a7" containerName="controller-manager" containerID="cri-o://f6082ba2f836efd335828d364876eb245329d7d38146be8961ecd24c33a1acbb" gracePeriod=30 Dec 13 06:34:41 crc kubenswrapper[5048]: I1213 06:34:41.127235 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d"] Dec 13 06:34:41 crc kubenswrapper[5048]: I1213 06:34:41.127503 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" containerName="route-controller-manager" containerID="cri-o://c2c1ed6442aea77d542f2e2c81ac18e1c0bfef3d208cb59d4043fa0b12360374" gracePeriod=30 Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.927068 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-7f58797b5d-hrdj7"] Dec 13 06:34:42 crc kubenswrapper[5048]: E1213 06:34:42.927758 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" containerName="extract-utilities" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.927780 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" containerName="extract-utilities" Dec 13 06:34:42 crc kubenswrapper[5048]: E1213 06:34:42.927801 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07531c82-87d1-409f-9c5a-4910633b5786" containerName="registry-server" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.927813 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="07531c82-87d1-409f-9c5a-4910633b5786" containerName="registry-server" Dec 13 06:34:42 crc kubenswrapper[5048]: E1213 06:34:42.927830 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" containerName="extract-content" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.927846 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" containerName="extract-content" Dec 13 06:34:42 crc kubenswrapper[5048]: E1213 06:34:42.927865 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" containerName="registry-server" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.927882 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" containerName="registry-server" Dec 13 06:34:42 crc kubenswrapper[5048]: E1213 06:34:42.927903 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="268d907d-1730-456e-8e52-67e58aca607b" containerName="installer" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.927919 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="268d907d-1730-456e-8e52-67e58aca607b" containerName="installer" Dec 13 06:34:42 crc kubenswrapper[5048]: E1213 06:34:42.927945 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa6cce4c-6bc2-469b-9062-e928744616db" containerName="oauth-openshift" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.927962 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa6cce4c-6bc2-469b-9062-e928744616db" containerName="oauth-openshift" Dec 13 06:34:42 crc kubenswrapper[5048]: E1213 06:34:42.927981 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" containerName="extract-utilities" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.927995 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" containerName="extract-utilities" Dec 13 06:34:42 crc kubenswrapper[5048]: E1213 06:34:42.928017 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" containerName="registry-server" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.928038 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" containerName="registry-server" Dec 13 06:34:42 crc kubenswrapper[5048]: E1213 06:34:42.928061 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" containerName="extract-content" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.928076 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" containerName="extract-content" Dec 13 06:34:42 crc kubenswrapper[5048]: E1213 06:34:42.928100 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07531c82-87d1-409f-9c5a-4910633b5786" containerName="extract-utilities" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.928112 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="07531c82-87d1-409f-9c5a-4910633b5786" containerName="extract-utilities" Dec 13 06:34:42 crc kubenswrapper[5048]: E1213 06:34:42.928133 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.928145 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 13 06:34:42 crc kubenswrapper[5048]: E1213 06:34:42.928159 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" containerName="registry-server" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.928171 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" containerName="registry-server" Dec 13 06:34:42 crc kubenswrapper[5048]: E1213 06:34:42.928188 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" containerName="extract-content" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.928200 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" containerName="extract-content" Dec 13 06:34:42 crc kubenswrapper[5048]: E1213 06:34:42.928217 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" containerName="extract-content" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.928230 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" containerName="extract-content" Dec 13 06:34:42 crc kubenswrapper[5048]: E1213 06:34:42.928243 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" containerName="extract-utilities" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.928256 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" containerName="extract-utilities" Dec 13 06:34:42 crc kubenswrapper[5048]: E1213 06:34:42.928274 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" containerName="extract-utilities" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.928287 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" containerName="extract-utilities" Dec 13 06:34:42 crc kubenswrapper[5048]: E1213 06:34:42.928303 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" containerName="extract-content" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.928315 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" containerName="extract-content" Dec 13 06:34:42 crc kubenswrapper[5048]: E1213 06:34:42.928332 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07531c82-87d1-409f-9c5a-4910633b5786" containerName="extract-content" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.928344 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="07531c82-87d1-409f-9c5a-4910633b5786" containerName="extract-content" Dec 13 06:34:42 crc kubenswrapper[5048]: E1213 06:34:42.928360 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" containerName="registry-server" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.928372 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" containerName="registry-server" Dec 13 06:34:42 crc kubenswrapper[5048]: E1213 06:34:42.928389 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" containerName="extract-utilities" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.928402 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" containerName="extract-utilities" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.928592 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="57717114-1abd-46bf-bdbd-0a785d734cd3" containerName="registry-server" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.928611 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="268d907d-1730-456e-8e52-67e58aca607b" containerName="installer" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.928631 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="07531c82-87d1-409f-9c5a-4910633b5786" containerName="registry-server" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.928649 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="6885bcbf-dd86-4e5a-bd21-92395c5ae676" containerName="registry-server" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.928664 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ea76a75-e8de-4a91-89af-726df36e8a21" containerName="registry-server" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.928730 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.928749 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa6cce4c-6bc2-469b-9062-e928744616db" containerName="oauth-openshift" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.928769 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5fb554d-84e3-4bf0-857f-a64da6e6a36f" containerName="extract-content" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.928788 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7b93b87-31d6-4279-8ac9-b834417f66d9" containerName="registry-server" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.929812 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.945459 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.946007 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.947730 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.948005 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.948813 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.948936 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.947780 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7f58797b5d-hrdj7"] Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.949197 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.949309 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.948824 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.949677 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.949918 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.950195 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.952655 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.956511 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Dec 13 06:34:42 crc kubenswrapper[5048]: I1213 06:34:42.958208 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.028324 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-audit-policies\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.028643 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.028837 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppz9z\" (UniqueName: \"kubernetes.io/projected/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-kube-api-access-ppz9z\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.028926 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-user-template-login\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.029011 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-audit-dir\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.029089 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.029174 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.029253 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-system-session\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.029356 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-system-service-ca\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.029571 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.029657 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-user-template-error\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.029737 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.029809 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.029882 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-system-router-certs\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.131639 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-audit-policies\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.133096 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.134160 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppz9z\" (UniqueName: \"kubernetes.io/projected/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-kube-api-access-ppz9z\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.134687 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-user-template-login\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.135238 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-audit-dir\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.135344 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-audit-dir\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.135375 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.135499 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.135548 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-system-session\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.135583 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-system-service-ca\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.135612 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.135669 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-user-template-error\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.133047 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-audit-policies\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.135717 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.135791 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.135835 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-system-router-certs\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.136373 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.137855 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-system-service-ca\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.138099 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.140263 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.146317 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-user-template-error\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.146505 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.146576 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-system-router-certs\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.146831 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-system-session\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.146923 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-user-template-login\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.147005 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.154107 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.159600 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppz9z\" (UniqueName: \"kubernetes.io/projected/41b23dfe-9cd0-43e2-98ad-e52eb61bc362-kube-api-access-ppz9z\") pod \"oauth-openshift-7f58797b5d-hrdj7\" (UID: \"41b23dfe-9cd0-43e2-98ad-e52eb61bc362\") " pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.339270 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.339701 5048 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="98b93aef31307e077f3cd0616fadc36c2475728e96423a52f5b253b522a06c94" exitCode=137 Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.373363 5048 generic.go:334] "Generic (PLEG): container finished" podID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" containerID="c2c1ed6442aea77d542f2e2c81ac18e1c0bfef3d208cb59d4043fa0b12360374" exitCode=0 Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.373491 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" event={"ID":"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284","Type":"ContainerDied","Data":"c2c1ed6442aea77d542f2e2c81ac18e1c0bfef3d208cb59d4043fa0b12360374"} Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.375459 5048 generic.go:334] "Generic (PLEG): container finished" podID="27b96e48-77ff-4f71-a919-9f30814704a7" containerID="f6082ba2f836efd335828d364876eb245329d7d38146be8961ecd24c33a1acbb" exitCode=0 Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.375510 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" event={"ID":"27b96e48-77ff-4f71-a919-9f30814704a7","Type":"ContainerDied","Data":"f6082ba2f836efd335828d364876eb245329d7d38146be8961ecd24c33a1acbb"} Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.657486 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.659720 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.700142 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-76f74d665d-wjh6p"] Dec 13 06:34:43 crc kubenswrapper[5048]: E1213 06:34:43.700484 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27b96e48-77ff-4f71-a919-9f30814704a7" containerName="controller-manager" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.700513 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="27b96e48-77ff-4f71-a919-9f30814704a7" containerName="controller-manager" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.700673 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="27b96e48-77ff-4f71-a919-9f30814704a7" containerName="controller-manager" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.701185 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.709612 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-76f74d665d-wjh6p"] Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.747508 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/27b96e48-77ff-4f71-a919-9f30814704a7-serving-cert\") pod \"27b96e48-77ff-4f71-a919-9f30814704a7\" (UID: \"27b96e48-77ff-4f71-a919-9f30814704a7\") " Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.749553 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/27b96e48-77ff-4f71-a919-9f30814704a7-proxy-ca-bundles\") pod \"27b96e48-77ff-4f71-a919-9f30814704a7\" (UID: \"27b96e48-77ff-4f71-a919-9f30814704a7\") " Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.749659 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/27b96e48-77ff-4f71-a919-9f30814704a7-client-ca\") pod \"27b96e48-77ff-4f71-a919-9f30814704a7\" (UID: \"27b96e48-77ff-4f71-a919-9f30814704a7\") " Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.749695 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ggczv\" (UniqueName: \"kubernetes.io/projected/27b96e48-77ff-4f71-a919-9f30814704a7-kube-api-access-ggczv\") pod \"27b96e48-77ff-4f71-a919-9f30814704a7\" (UID: \"27b96e48-77ff-4f71-a919-9f30814704a7\") " Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.749759 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27b96e48-77ff-4f71-a919-9f30814704a7-config\") pod \"27b96e48-77ff-4f71-a919-9f30814704a7\" (UID: \"27b96e48-77ff-4f71-a919-9f30814704a7\") " Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.750728 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27b96e48-77ff-4f71-a919-9f30814704a7-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "27b96e48-77ff-4f71-a919-9f30814704a7" (UID: "27b96e48-77ff-4f71-a919-9f30814704a7"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.750773 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27b96e48-77ff-4f71-a919-9f30814704a7-client-ca" (OuterVolumeSpecName: "client-ca") pod "27b96e48-77ff-4f71-a919-9f30814704a7" (UID: "27b96e48-77ff-4f71-a919-9f30814704a7"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.752089 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27b96e48-77ff-4f71-a919-9f30814704a7-config" (OuterVolumeSpecName: "config") pod "27b96e48-77ff-4f71-a919-9f30814704a7" (UID: "27b96e48-77ff-4f71-a919-9f30814704a7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.755571 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27b96e48-77ff-4f71-a919-9f30814704a7-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "27b96e48-77ff-4f71-a919-9f30814704a7" (UID: "27b96e48-77ff-4f71-a919-9f30814704a7"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.755611 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27b96e48-77ff-4f71-a919-9f30814704a7-kube-api-access-ggczv" (OuterVolumeSpecName: "kube-api-access-ggczv") pod "27b96e48-77ff-4f71-a919-9f30814704a7" (UID: "27b96e48-77ff-4f71-a919-9f30814704a7"). InnerVolumeSpecName "kube-api-access-ggczv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.852193 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dt7hr\" (UniqueName: \"kubernetes.io/projected/c41022d8-8b60-453c-a928-2432a702d3e9-kube-api-access-dt7hr\") pod \"controller-manager-76f74d665d-wjh6p\" (UID: \"c41022d8-8b60-453c-a928-2432a702d3e9\") " pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.852263 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c41022d8-8b60-453c-a928-2432a702d3e9-proxy-ca-bundles\") pod \"controller-manager-76f74d665d-wjh6p\" (UID: \"c41022d8-8b60-453c-a928-2432a702d3e9\") " pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.852292 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c41022d8-8b60-453c-a928-2432a702d3e9-serving-cert\") pod \"controller-manager-76f74d665d-wjh6p\" (UID: \"c41022d8-8b60-453c-a928-2432a702d3e9\") " pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.852391 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c41022d8-8b60-453c-a928-2432a702d3e9-client-ca\") pod \"controller-manager-76f74d665d-wjh6p\" (UID: \"c41022d8-8b60-453c-a928-2432a702d3e9\") " pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.852462 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c41022d8-8b60-453c-a928-2432a702d3e9-config\") pod \"controller-manager-76f74d665d-wjh6p\" (UID: \"c41022d8-8b60-453c-a928-2432a702d3e9\") " pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.852517 5048 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/27b96e48-77ff-4f71-a919-9f30814704a7-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.852537 5048 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/27b96e48-77ff-4f71-a919-9f30814704a7-client-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.852551 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ggczv\" (UniqueName: \"kubernetes.io/projected/27b96e48-77ff-4f71-a919-9f30814704a7-kube-api-access-ggczv\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.852564 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27b96e48-77ff-4f71-a919-9f30814704a7-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.852575 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/27b96e48-77ff-4f71-a919-9f30814704a7-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.901843 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7f58797b5d-hrdj7"] Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.916765 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.953773 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c41022d8-8b60-453c-a928-2432a702d3e9-client-ca\") pod \"controller-manager-76f74d665d-wjh6p\" (UID: \"c41022d8-8b60-453c-a928-2432a702d3e9\") " pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.954201 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c41022d8-8b60-453c-a928-2432a702d3e9-config\") pod \"controller-manager-76f74d665d-wjh6p\" (UID: \"c41022d8-8b60-453c-a928-2432a702d3e9\") " pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.954242 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dt7hr\" (UniqueName: \"kubernetes.io/projected/c41022d8-8b60-453c-a928-2432a702d3e9-kube-api-access-dt7hr\") pod \"controller-manager-76f74d665d-wjh6p\" (UID: \"c41022d8-8b60-453c-a928-2432a702d3e9\") " pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.954267 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c41022d8-8b60-453c-a928-2432a702d3e9-proxy-ca-bundles\") pod \"controller-manager-76f74d665d-wjh6p\" (UID: \"c41022d8-8b60-453c-a928-2432a702d3e9\") " pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.954284 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c41022d8-8b60-453c-a928-2432a702d3e9-serving-cert\") pod \"controller-manager-76f74d665d-wjh6p\" (UID: \"c41022d8-8b60-453c-a928-2432a702d3e9\") " pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.957955 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c41022d8-8b60-453c-a928-2432a702d3e9-serving-cert\") pod \"controller-manager-76f74d665d-wjh6p\" (UID: \"c41022d8-8b60-453c-a928-2432a702d3e9\") " pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.959248 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c41022d8-8b60-453c-a928-2432a702d3e9-config\") pod \"controller-manager-76f74d665d-wjh6p\" (UID: \"c41022d8-8b60-453c-a928-2432a702d3e9\") " pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.960204 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c41022d8-8b60-453c-a928-2432a702d3e9-proxy-ca-bundles\") pod \"controller-manager-76f74d665d-wjh6p\" (UID: \"c41022d8-8b60-453c-a928-2432a702d3e9\") " pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.960733 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c41022d8-8b60-453c-a928-2432a702d3e9-client-ca\") pod \"controller-manager-76f74d665d-wjh6p\" (UID: \"c41022d8-8b60-453c-a928-2432a702d3e9\") " pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" Dec 13 06:34:43 crc kubenswrapper[5048]: I1213 06:34:43.974700 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dt7hr\" (UniqueName: \"kubernetes.io/projected/c41022d8-8b60-453c-a928-2432a702d3e9-kube-api-access-dt7hr\") pod \"controller-manager-76f74d665d-wjh6p\" (UID: \"c41022d8-8b60-453c-a928-2432a702d3e9\") " pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.024043 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.024200 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.056166 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-snkhn\" (UniqueName: \"kubernetes.io/projected/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-kube-api-access-snkhn\") pod \"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284\" (UID: \"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284\") " Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.056506 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-client-ca\") pod \"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284\" (UID: \"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284\") " Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.056617 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-serving-cert\") pod \"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284\" (UID: \"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284\") " Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.056656 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-config\") pod \"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284\" (UID: \"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284\") " Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.057768 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-client-ca" (OuterVolumeSpecName: "client-ca") pod "8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" (UID: "8c1ef8c9-54d7-4182-bd65-cdbf97bb6284"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.058094 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-config" (OuterVolumeSpecName: "config") pod "8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" (UID: "8c1ef8c9-54d7-4182-bd65-cdbf97bb6284"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.058999 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.059702 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-kube-api-access-snkhn" (OuterVolumeSpecName: "kube-api-access-snkhn") pod "8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" (UID: "8c1ef8c9-54d7-4182-bd65-cdbf97bb6284"). InnerVolumeSpecName "kube-api-access-snkhn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.059879 5048 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-client-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.059902 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.059920 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-snkhn\" (UniqueName: \"kubernetes.io/projected/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-kube-api-access-snkhn\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.062162 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" (UID: "8c1ef8c9-54d7-4182-bd65-cdbf97bb6284"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.161335 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.161865 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.161938 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.161994 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.162014 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.162325 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.161694 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.162404 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.162464 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.163505 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.170954 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.241221 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-76f74d665d-wjh6p"] Dec 13 06:34:44 crc kubenswrapper[5048]: W1213 06:34:44.245045 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc41022d8_8b60_453c_a928_2432a702d3e9.slice/crio-67d64f214e9aac8819fd2d479ad5215e48f9b7bc17b9f6aa420af38f0cf1fecb WatchSource:0}: Error finding container 67d64f214e9aac8819fd2d479ad5215e48f9b7bc17b9f6aa420af38f0cf1fecb: Status 404 returned error can't find the container with id 67d64f214e9aac8819fd2d479ad5215e48f9b7bc17b9f6aa420af38f0cf1fecb Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.263646 5048 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.263678 5048 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.263689 5048 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.263702 5048 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.263713 5048 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.382207 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" event={"ID":"27b96e48-77ff-4f71-a919-9f30814704a7","Type":"ContainerDied","Data":"4f290f59ab2e92c736faa6bf051d981a5ac6eb7442146807cedf96600695bf5a"} Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.382260 5048 scope.go:117] "RemoveContainer" containerID="f6082ba2f836efd335828d364876eb245329d7d38146be8961ecd24c33a1acbb" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.382261 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5d89566846-prjbf" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.384170 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" event={"ID":"c41022d8-8b60-453c-a928-2432a702d3e9","Type":"ContainerStarted","Data":"67d64f214e9aac8819fd2d479ad5215e48f9b7bc17b9f6aa420af38f0cf1fecb"} Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.385668 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" event={"ID":"41b23dfe-9cd0-43e2-98ad-e52eb61bc362","Type":"ContainerStarted","Data":"6fc61e91301cd974aab48d41165f34945d93e61a7b8d9510a62d8c1cccd5f9f3"} Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.392174 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.392323 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.393977 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" event={"ID":"8c1ef8c9-54d7-4182-bd65-cdbf97bb6284","Type":"ContainerDied","Data":"cf473a60a9b3ac2739bdb4ff8fbd3a8dd4f6ce06b35cd790a919dd87e58893fc"} Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.394031 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.432463 5048 scope.go:117] "RemoveContainer" containerID="98b93aef31307e077f3cd0616fadc36c2475728e96423a52f5b253b522a06c94" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.454976 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5d89566846-prjbf"] Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.458737 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-5d89566846-prjbf"] Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.461947 5048 scope.go:117] "RemoveContainer" containerID="c2c1ed6442aea77d542f2e2c81ac18e1c0bfef3d208cb59d4043fa0b12360374" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.466059 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d"] Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.468871 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74dc84cd6c-9769d"] Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.577505 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27b96e48-77ff-4f71-a919-9f30814704a7" path="/var/lib/kubelet/pods/27b96e48-77ff-4f71-a919-9f30814704a7/volumes" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.578037 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" path="/var/lib/kubelet/pods/8c1ef8c9-54d7-4182-bd65-cdbf97bb6284/volumes" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.578370 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.578588 5048 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.588898 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.588928 5048 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="589de657-cc20-48e6-9ad3-6409eddee075" Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.589044 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 13 06:34:44 crc kubenswrapper[5048]: I1213 06:34:44.589082 5048 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="589de657-cc20-48e6-9ad3-6409eddee075" Dec 13 06:34:45 crc kubenswrapper[5048]: I1213 06:34:45.921732 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh"] Dec 13 06:34:45 crc kubenswrapper[5048]: E1213 06:34:45.921934 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" containerName="route-controller-manager" Dec 13 06:34:45 crc kubenswrapper[5048]: I1213 06:34:45.921946 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" containerName="route-controller-manager" Dec 13 06:34:45 crc kubenswrapper[5048]: I1213 06:34:45.922111 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c1ef8c9-54d7-4182-bd65-cdbf97bb6284" containerName="route-controller-manager" Dec 13 06:34:45 crc kubenswrapper[5048]: I1213 06:34:45.922558 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" Dec 13 06:34:45 crc kubenswrapper[5048]: I1213 06:34:45.924782 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 13 06:34:45 crc kubenswrapper[5048]: I1213 06:34:45.924955 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 13 06:34:45 crc kubenswrapper[5048]: I1213 06:34:45.925125 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 13 06:34:45 crc kubenswrapper[5048]: I1213 06:34:45.925266 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 13 06:34:45 crc kubenswrapper[5048]: I1213 06:34:45.925277 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 13 06:34:45 crc kubenswrapper[5048]: I1213 06:34:45.925409 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 13 06:34:45 crc kubenswrapper[5048]: I1213 06:34:45.932575 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh"] Dec 13 06:34:46 crc kubenswrapper[5048]: I1213 06:34:46.090513 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8p62\" (UniqueName: \"kubernetes.io/projected/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-kube-api-access-c8p62\") pod \"route-controller-manager-6b5c687b9c-xbwgh\" (UID: \"4c502429-c5ef-4960-96eb-c1eb5b0a84d9\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" Dec 13 06:34:46 crc kubenswrapper[5048]: I1213 06:34:46.090597 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-client-ca\") pod \"route-controller-manager-6b5c687b9c-xbwgh\" (UID: \"4c502429-c5ef-4960-96eb-c1eb5b0a84d9\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" Dec 13 06:34:46 crc kubenswrapper[5048]: I1213 06:34:46.090634 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-config\") pod \"route-controller-manager-6b5c687b9c-xbwgh\" (UID: \"4c502429-c5ef-4960-96eb-c1eb5b0a84d9\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" Dec 13 06:34:46 crc kubenswrapper[5048]: I1213 06:34:46.090691 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-serving-cert\") pod \"route-controller-manager-6b5c687b9c-xbwgh\" (UID: \"4c502429-c5ef-4960-96eb-c1eb5b0a84d9\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" Dec 13 06:34:46 crc kubenswrapper[5048]: I1213 06:34:46.192095 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8p62\" (UniqueName: \"kubernetes.io/projected/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-kube-api-access-c8p62\") pod \"route-controller-manager-6b5c687b9c-xbwgh\" (UID: \"4c502429-c5ef-4960-96eb-c1eb5b0a84d9\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" Dec 13 06:34:46 crc kubenswrapper[5048]: I1213 06:34:46.192204 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-client-ca\") pod \"route-controller-manager-6b5c687b9c-xbwgh\" (UID: \"4c502429-c5ef-4960-96eb-c1eb5b0a84d9\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" Dec 13 06:34:46 crc kubenswrapper[5048]: I1213 06:34:46.192242 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-config\") pod \"route-controller-manager-6b5c687b9c-xbwgh\" (UID: \"4c502429-c5ef-4960-96eb-c1eb5b0a84d9\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" Dec 13 06:34:46 crc kubenswrapper[5048]: I1213 06:34:46.192279 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-serving-cert\") pod \"route-controller-manager-6b5c687b9c-xbwgh\" (UID: \"4c502429-c5ef-4960-96eb-c1eb5b0a84d9\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" Dec 13 06:34:46 crc kubenswrapper[5048]: I1213 06:34:46.193721 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-client-ca\") pod \"route-controller-manager-6b5c687b9c-xbwgh\" (UID: \"4c502429-c5ef-4960-96eb-c1eb5b0a84d9\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" Dec 13 06:34:46 crc kubenswrapper[5048]: I1213 06:34:46.194955 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-config\") pod \"route-controller-manager-6b5c687b9c-xbwgh\" (UID: \"4c502429-c5ef-4960-96eb-c1eb5b0a84d9\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" Dec 13 06:34:46 crc kubenswrapper[5048]: I1213 06:34:46.199560 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-serving-cert\") pod \"route-controller-manager-6b5c687b9c-xbwgh\" (UID: \"4c502429-c5ef-4960-96eb-c1eb5b0a84d9\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" Dec 13 06:34:46 crc kubenswrapper[5048]: I1213 06:34:46.209251 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8p62\" (UniqueName: \"kubernetes.io/projected/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-kube-api-access-c8p62\") pod \"route-controller-manager-6b5c687b9c-xbwgh\" (UID: \"4c502429-c5ef-4960-96eb-c1eb5b0a84d9\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" Dec 13 06:34:46 crc kubenswrapper[5048]: I1213 06:34:46.251409 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" Dec 13 06:34:46 crc kubenswrapper[5048]: I1213 06:34:46.454115 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh"] Dec 13 06:34:47 crc kubenswrapper[5048]: I1213 06:34:47.421119 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" event={"ID":"41b23dfe-9cd0-43e2-98ad-e52eb61bc362","Type":"ContainerStarted","Data":"5277dbb0469bdc0f19520d298f5d88f6af7319cbb12e9070b279022f6e173fa6"} Dec 13 06:34:47 crc kubenswrapper[5048]: I1213 06:34:47.423403 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" event={"ID":"c41022d8-8b60-453c-a928-2432a702d3e9","Type":"ContainerStarted","Data":"c36e0f1f7decd4beb05cbe9a7a3cb322b0be0c27e1aa338398d8e6117be53b0f"} Dec 13 06:34:47 crc kubenswrapper[5048]: I1213 06:34:47.424329 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" event={"ID":"4c502429-c5ef-4960-96eb-c1eb5b0a84d9","Type":"ContainerStarted","Data":"4400ec571408fd02735fefc70dcfc17e65741e62a06a2644f2f4fdd1f1477c87"} Dec 13 06:34:47 crc kubenswrapper[5048]: I1213 06:34:47.847275 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Dec 13 06:34:48 crc kubenswrapper[5048]: I1213 06:34:48.434481 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" event={"ID":"4c502429-c5ef-4960-96eb-c1eb5b0a84d9","Type":"ContainerStarted","Data":"463a6830bd27925df4c128df691f0cfbdcc89198859eaccf6bf0d15fc50e7d76"} Dec 13 06:34:48 crc kubenswrapper[5048]: I1213 06:34:48.435044 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" Dec 13 06:34:48 crc kubenswrapper[5048]: I1213 06:34:48.442693 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" Dec 13 06:34:48 crc kubenswrapper[5048]: I1213 06:34:48.454273 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" podStartSLOduration=7.454249436 podStartE2EDuration="7.454249436s" podCreationTimestamp="2025-12-13 06:34:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:34:48.454198624 +0000 UTC m=+322.320793215" watchObservedRunningTime="2025-12-13 06:34:48.454249436 +0000 UTC m=+322.320844027" Dec 13 06:34:49 crc kubenswrapper[5048]: I1213 06:34:49.439580 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" Dec 13 06:34:49 crc kubenswrapper[5048]: I1213 06:34:49.439663 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:49 crc kubenswrapper[5048]: I1213 06:34:49.446352 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" Dec 13 06:34:49 crc kubenswrapper[5048]: I1213 06:34:49.448201 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" Dec 13 06:34:49 crc kubenswrapper[5048]: I1213 06:34:49.467492 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" podStartSLOduration=8.467465631 podStartE2EDuration="8.467465631s" podCreationTimestamp="2025-12-13 06:34:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:34:49.464642578 +0000 UTC m=+323.331237179" watchObservedRunningTime="2025-12-13 06:34:49.467465631 +0000 UTC m=+323.334060242" Dec 13 06:34:49 crc kubenswrapper[5048]: I1213 06:34:49.494630 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-7f58797b5d-hrdj7" podStartSLOduration=69.49460803 podStartE2EDuration="1m9.49460803s" podCreationTimestamp="2025-12-13 06:33:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:34:49.494136066 +0000 UTC m=+323.360730697" watchObservedRunningTime="2025-12-13 06:34:49.49460803 +0000 UTC m=+323.361202631" Dec 13 06:34:51 crc kubenswrapper[5048]: I1213 06:34:51.566802 5048 scope.go:117] "RemoveContainer" containerID="3ad09671069fe23f98d2cd657e5b03c8e9347cf52e8533e300243ef22004b429" Dec 13 06:34:55 crc kubenswrapper[5048]: I1213 06:34:55.478465 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-zm7qx_60f5ae10-2f86-46f8-b613-f017b8753690/marketplace-operator/2.log" Dec 13 06:34:55 crc kubenswrapper[5048]: I1213 06:34:55.479512 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" event={"ID":"60f5ae10-2f86-46f8-b613-f017b8753690","Type":"ContainerStarted","Data":"13c574a3fb91db348eed60d03c898c96cc333b4592616c5871a3022f6fd1104b"} Dec 13 06:34:55 crc kubenswrapper[5048]: I1213 06:34:55.480996 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" Dec 13 06:34:55 crc kubenswrapper[5048]: I1213 06:34:55.487497 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" Dec 13 06:34:55 crc kubenswrapper[5048]: I1213 06:34:55.503544 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-zm7qx" podStartSLOduration=71.503525717 podStartE2EDuration="1m11.503525717s" podCreationTimestamp="2025-12-13 06:33:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:34:55.500508792 +0000 UTC m=+329.367103393" watchObservedRunningTime="2025-12-13 06:34:55.503525717 +0000 UTC m=+329.370120318" Dec 13 06:34:56 crc kubenswrapper[5048]: I1213 06:34:56.815732 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Dec 13 06:35:00 crc kubenswrapper[5048]: I1213 06:35:00.692844 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-g8j5s"] Dec 13 06:35:00 crc kubenswrapper[5048]: I1213 06:35:00.694152 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g8j5s" Dec 13 06:35:00 crc kubenswrapper[5048]: I1213 06:35:00.698173 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 13 06:35:00 crc kubenswrapper[5048]: I1213 06:35:00.708960 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-g8j5s"] Dec 13 06:35:00 crc kubenswrapper[5048]: I1213 06:35:00.822911 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzths\" (UniqueName: \"kubernetes.io/projected/07c47b88-4b41-47d9-ae48-feacb3431a47-kube-api-access-nzths\") pod \"certified-operators-g8j5s\" (UID: \"07c47b88-4b41-47d9-ae48-feacb3431a47\") " pod="openshift-marketplace/certified-operators-g8j5s" Dec 13 06:35:00 crc kubenswrapper[5048]: I1213 06:35:00.823457 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07c47b88-4b41-47d9-ae48-feacb3431a47-catalog-content\") pod \"certified-operators-g8j5s\" (UID: \"07c47b88-4b41-47d9-ae48-feacb3431a47\") " pod="openshift-marketplace/certified-operators-g8j5s" Dec 13 06:35:00 crc kubenswrapper[5048]: I1213 06:35:00.823514 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07c47b88-4b41-47d9-ae48-feacb3431a47-utilities\") pod \"certified-operators-g8j5s\" (UID: \"07c47b88-4b41-47d9-ae48-feacb3431a47\") " pod="openshift-marketplace/certified-operators-g8j5s" Dec 13 06:35:00 crc kubenswrapper[5048]: I1213 06:35:00.892073 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-ngg4l"] Dec 13 06:35:00 crc kubenswrapper[5048]: I1213 06:35:00.893337 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ngg4l" Dec 13 06:35:00 crc kubenswrapper[5048]: I1213 06:35:00.897112 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 13 06:35:00 crc kubenswrapper[5048]: I1213 06:35:00.906045 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ngg4l"] Dec 13 06:35:00 crc kubenswrapper[5048]: I1213 06:35:00.924657 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07c47b88-4b41-47d9-ae48-feacb3431a47-utilities\") pod \"certified-operators-g8j5s\" (UID: \"07c47b88-4b41-47d9-ae48-feacb3431a47\") " pod="openshift-marketplace/certified-operators-g8j5s" Dec 13 06:35:00 crc kubenswrapper[5048]: I1213 06:35:00.925040 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzths\" (UniqueName: \"kubernetes.io/projected/07c47b88-4b41-47d9-ae48-feacb3431a47-kube-api-access-nzths\") pod \"certified-operators-g8j5s\" (UID: \"07c47b88-4b41-47d9-ae48-feacb3431a47\") " pod="openshift-marketplace/certified-operators-g8j5s" Dec 13 06:35:00 crc kubenswrapper[5048]: I1213 06:35:00.925106 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07c47b88-4b41-47d9-ae48-feacb3431a47-catalog-content\") pod \"certified-operators-g8j5s\" (UID: \"07c47b88-4b41-47d9-ae48-feacb3431a47\") " pod="openshift-marketplace/certified-operators-g8j5s" Dec 13 06:35:00 crc kubenswrapper[5048]: I1213 06:35:00.925176 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07c47b88-4b41-47d9-ae48-feacb3431a47-utilities\") pod \"certified-operators-g8j5s\" (UID: \"07c47b88-4b41-47d9-ae48-feacb3431a47\") " pod="openshift-marketplace/certified-operators-g8j5s" Dec 13 06:35:00 crc kubenswrapper[5048]: I1213 06:35:00.925512 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07c47b88-4b41-47d9-ae48-feacb3431a47-catalog-content\") pod \"certified-operators-g8j5s\" (UID: \"07c47b88-4b41-47d9-ae48-feacb3431a47\") " pod="openshift-marketplace/certified-operators-g8j5s" Dec 13 06:35:00 crc kubenswrapper[5048]: I1213 06:35:00.944535 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzths\" (UniqueName: \"kubernetes.io/projected/07c47b88-4b41-47d9-ae48-feacb3431a47-kube-api-access-nzths\") pod \"certified-operators-g8j5s\" (UID: \"07c47b88-4b41-47d9-ae48-feacb3431a47\") " pod="openshift-marketplace/certified-operators-g8j5s" Dec 13 06:35:01 crc kubenswrapper[5048]: I1213 06:35:01.014006 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g8j5s" Dec 13 06:35:01 crc kubenswrapper[5048]: I1213 06:35:01.025153 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-76f74d665d-wjh6p"] Dec 13 06:35:01 crc kubenswrapper[5048]: I1213 06:35:01.025374 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" podUID="c41022d8-8b60-453c-a928-2432a702d3e9" containerName="controller-manager" containerID="cri-o://c36e0f1f7decd4beb05cbe9a7a3cb322b0be0c27e1aa338398d8e6117be53b0f" gracePeriod=30 Dec 13 06:35:01 crc kubenswrapper[5048]: I1213 06:35:01.025889 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a00d4f4-f561-491b-a236-7e46f411f58e-utilities\") pod \"community-operators-ngg4l\" (UID: \"2a00d4f4-f561-491b-a236-7e46f411f58e\") " pod="openshift-marketplace/community-operators-ngg4l" Dec 13 06:35:01 crc kubenswrapper[5048]: I1213 06:35:01.025925 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jcvc\" (UniqueName: \"kubernetes.io/projected/2a00d4f4-f561-491b-a236-7e46f411f58e-kube-api-access-9jcvc\") pod \"community-operators-ngg4l\" (UID: \"2a00d4f4-f561-491b-a236-7e46f411f58e\") " pod="openshift-marketplace/community-operators-ngg4l" Dec 13 06:35:01 crc kubenswrapper[5048]: I1213 06:35:01.025954 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a00d4f4-f561-491b-a236-7e46f411f58e-catalog-content\") pod \"community-operators-ngg4l\" (UID: \"2a00d4f4-f561-491b-a236-7e46f411f58e\") " pod="openshift-marketplace/community-operators-ngg4l" Dec 13 06:35:01 crc kubenswrapper[5048]: I1213 06:35:01.047578 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh"] Dec 13 06:35:01 crc kubenswrapper[5048]: I1213 06:35:01.047788 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" podUID="4c502429-c5ef-4960-96eb-c1eb5b0a84d9" containerName="route-controller-manager" containerID="cri-o://463a6830bd27925df4c128df691f0cfbdcc89198859eaccf6bf0d15fc50e7d76" gracePeriod=30 Dec 13 06:35:01 crc kubenswrapper[5048]: I1213 06:35:01.126924 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a00d4f4-f561-491b-a236-7e46f411f58e-catalog-content\") pod \"community-operators-ngg4l\" (UID: \"2a00d4f4-f561-491b-a236-7e46f411f58e\") " pod="openshift-marketplace/community-operators-ngg4l" Dec 13 06:35:01 crc kubenswrapper[5048]: I1213 06:35:01.127297 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a00d4f4-f561-491b-a236-7e46f411f58e-utilities\") pod \"community-operators-ngg4l\" (UID: \"2a00d4f4-f561-491b-a236-7e46f411f58e\") " pod="openshift-marketplace/community-operators-ngg4l" Dec 13 06:35:01 crc kubenswrapper[5048]: I1213 06:35:01.127320 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jcvc\" (UniqueName: \"kubernetes.io/projected/2a00d4f4-f561-491b-a236-7e46f411f58e-kube-api-access-9jcvc\") pod \"community-operators-ngg4l\" (UID: \"2a00d4f4-f561-491b-a236-7e46f411f58e\") " pod="openshift-marketplace/community-operators-ngg4l" Dec 13 06:35:01 crc kubenswrapper[5048]: I1213 06:35:01.127356 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a00d4f4-f561-491b-a236-7e46f411f58e-catalog-content\") pod \"community-operators-ngg4l\" (UID: \"2a00d4f4-f561-491b-a236-7e46f411f58e\") " pod="openshift-marketplace/community-operators-ngg4l" Dec 13 06:35:01 crc kubenswrapper[5048]: I1213 06:35:01.127771 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a00d4f4-f561-491b-a236-7e46f411f58e-utilities\") pod \"community-operators-ngg4l\" (UID: \"2a00d4f4-f561-491b-a236-7e46f411f58e\") " pod="openshift-marketplace/community-operators-ngg4l" Dec 13 06:35:01 crc kubenswrapper[5048]: I1213 06:35:01.154685 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jcvc\" (UniqueName: \"kubernetes.io/projected/2a00d4f4-f561-491b-a236-7e46f411f58e-kube-api-access-9jcvc\") pod \"community-operators-ngg4l\" (UID: \"2a00d4f4-f561-491b-a236-7e46f411f58e\") " pod="openshift-marketplace/community-operators-ngg4l" Dec 13 06:35:01 crc kubenswrapper[5048]: I1213 06:35:01.210995 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ngg4l" Dec 13 06:35:01 crc kubenswrapper[5048]: I1213 06:35:01.430294 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-g8j5s"] Dec 13 06:35:01 crc kubenswrapper[5048]: W1213 06:35:01.432923 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod07c47b88_4b41_47d9_ae48_feacb3431a47.slice/crio-f0f2a572fc1e86a95e508f98eef9f247764c77e6538780b0cabe650f4fccea4c WatchSource:0}: Error finding container f0f2a572fc1e86a95e508f98eef9f247764c77e6538780b0cabe650f4fccea4c: Status 404 returned error can't find the container with id f0f2a572fc1e86a95e508f98eef9f247764c77e6538780b0cabe650f4fccea4c Dec 13 06:35:01 crc kubenswrapper[5048]: I1213 06:35:01.513286 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g8j5s" event={"ID":"07c47b88-4b41-47d9-ae48-feacb3431a47","Type":"ContainerStarted","Data":"f0f2a572fc1e86a95e508f98eef9f247764c77e6538780b0cabe650f4fccea4c"} Dec 13 06:35:01 crc kubenswrapper[5048]: I1213 06:35:01.600967 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ngg4l"] Dec 13 06:35:01 crc kubenswrapper[5048]: W1213 06:35:01.609604 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2a00d4f4_f561_491b_a236_7e46f411f58e.slice/crio-3f176b5c016e7053f81fe6b7c6064dce617273697f2e70deb3e75617261768df WatchSource:0}: Error finding container 3f176b5c016e7053f81fe6b7c6064dce617273697f2e70deb3e75617261768df: Status 404 returned error can't find the container with id 3f176b5c016e7053f81fe6b7c6064dce617273697f2e70deb3e75617261768df Dec 13 06:35:02 crc kubenswrapper[5048]: I1213 06:35:02.519982 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g8j5s" event={"ID":"07c47b88-4b41-47d9-ae48-feacb3431a47","Type":"ContainerStarted","Data":"4c1ca6ae2267d5553735a03a82e3ec9098f846fb0f4965bc82b53c34e622f164"} Dec 13 06:35:02 crc kubenswrapper[5048]: I1213 06:35:02.522489 5048 generic.go:334] "Generic (PLEG): container finished" podID="c41022d8-8b60-453c-a928-2432a702d3e9" containerID="c36e0f1f7decd4beb05cbe9a7a3cb322b0be0c27e1aa338398d8e6117be53b0f" exitCode=0 Dec 13 06:35:02 crc kubenswrapper[5048]: I1213 06:35:02.522572 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" event={"ID":"c41022d8-8b60-453c-a928-2432a702d3e9","Type":"ContainerDied","Data":"c36e0f1f7decd4beb05cbe9a7a3cb322b0be0c27e1aa338398d8e6117be53b0f"} Dec 13 06:35:02 crc kubenswrapper[5048]: I1213 06:35:02.524946 5048 generic.go:334] "Generic (PLEG): container finished" podID="4c502429-c5ef-4960-96eb-c1eb5b0a84d9" containerID="463a6830bd27925df4c128df691f0cfbdcc89198859eaccf6bf0d15fc50e7d76" exitCode=0 Dec 13 06:35:02 crc kubenswrapper[5048]: I1213 06:35:02.525045 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" event={"ID":"4c502429-c5ef-4960-96eb-c1eb5b0a84d9","Type":"ContainerDied","Data":"463a6830bd27925df4c128df691f0cfbdcc89198859eaccf6bf0d15fc50e7d76"} Dec 13 06:35:02 crc kubenswrapper[5048]: I1213 06:35:02.527177 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ngg4l" event={"ID":"2a00d4f4-f561-491b-a236-7e46f411f58e","Type":"ContainerStarted","Data":"3c70defbccbd1be1ff38854122eb900c054c62ac49307698f6deea3bc622ac66"} Dec 13 06:35:02 crc kubenswrapper[5048]: I1213 06:35:02.527235 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ngg4l" event={"ID":"2a00d4f4-f561-491b-a236-7e46f411f58e","Type":"ContainerStarted","Data":"3f176b5c016e7053f81fe6b7c6064dce617273697f2e70deb3e75617261768df"} Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.090031 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bgnnj"] Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.091542 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bgnnj" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.093504 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.155615 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bgnnj"] Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.253342 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cdd6ca2-d077-4ea1-8dda-6fadccca087d-catalog-content\") pod \"redhat-marketplace-bgnnj\" (UID: \"0cdd6ca2-d077-4ea1-8dda-6fadccca087d\") " pod="openshift-marketplace/redhat-marketplace-bgnnj" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.253482 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cdd6ca2-d077-4ea1-8dda-6fadccca087d-utilities\") pod \"redhat-marketplace-bgnnj\" (UID: \"0cdd6ca2-d077-4ea1-8dda-6fadccca087d\") " pod="openshift-marketplace/redhat-marketplace-bgnnj" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.253511 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hnkc\" (UniqueName: \"kubernetes.io/projected/0cdd6ca2-d077-4ea1-8dda-6fadccca087d-kube-api-access-5hnkc\") pod \"redhat-marketplace-bgnnj\" (UID: \"0cdd6ca2-d077-4ea1-8dda-6fadccca087d\") " pod="openshift-marketplace/redhat-marketplace-bgnnj" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.275103 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" podUID="36ee331b-baa0-42ac-9bd3-7c52253814e1" containerName="registry" containerID="cri-o://01798a848041919b152c6308fa93ba8571978c608dfe2fa259c63306f76f0669" gracePeriod=30 Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.288332 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dqz4m"] Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.291711 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dqz4m" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.294157 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.325404 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dqz4m"] Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.354417 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cdd6ca2-d077-4ea1-8dda-6fadccca087d-utilities\") pod \"redhat-marketplace-bgnnj\" (UID: \"0cdd6ca2-d077-4ea1-8dda-6fadccca087d\") " pod="openshift-marketplace/redhat-marketplace-bgnnj" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.354877 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hnkc\" (UniqueName: \"kubernetes.io/projected/0cdd6ca2-d077-4ea1-8dda-6fadccca087d-kube-api-access-5hnkc\") pod \"redhat-marketplace-bgnnj\" (UID: \"0cdd6ca2-d077-4ea1-8dda-6fadccca087d\") " pod="openshift-marketplace/redhat-marketplace-bgnnj" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.354946 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cdd6ca2-d077-4ea1-8dda-6fadccca087d-catalog-content\") pod \"redhat-marketplace-bgnnj\" (UID: \"0cdd6ca2-d077-4ea1-8dda-6fadccca087d\") " pod="openshift-marketplace/redhat-marketplace-bgnnj" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.355635 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cdd6ca2-d077-4ea1-8dda-6fadccca087d-catalog-content\") pod \"redhat-marketplace-bgnnj\" (UID: \"0cdd6ca2-d077-4ea1-8dda-6fadccca087d\") " pod="openshift-marketplace/redhat-marketplace-bgnnj" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.355779 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cdd6ca2-d077-4ea1-8dda-6fadccca087d-utilities\") pod \"redhat-marketplace-bgnnj\" (UID: \"0cdd6ca2-d077-4ea1-8dda-6fadccca087d\") " pod="openshift-marketplace/redhat-marketplace-bgnnj" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.379385 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hnkc\" (UniqueName: \"kubernetes.io/projected/0cdd6ca2-d077-4ea1-8dda-6fadccca087d-kube-api-access-5hnkc\") pod \"redhat-marketplace-bgnnj\" (UID: \"0cdd6ca2-d077-4ea1-8dda-6fadccca087d\") " pod="openshift-marketplace/redhat-marketplace-bgnnj" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.455919 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/152d7097-80ce-42d3-b0a8-45e04a295b3d-utilities\") pod \"redhat-operators-dqz4m\" (UID: \"152d7097-80ce-42d3-b0a8-45e04a295b3d\") " pod="openshift-marketplace/redhat-operators-dqz4m" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.456001 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/152d7097-80ce-42d3-b0a8-45e04a295b3d-catalog-content\") pod \"redhat-operators-dqz4m\" (UID: \"152d7097-80ce-42d3-b0a8-45e04a295b3d\") " pod="openshift-marketplace/redhat-operators-dqz4m" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.456020 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7844z\" (UniqueName: \"kubernetes.io/projected/152d7097-80ce-42d3-b0a8-45e04a295b3d-kube-api-access-7844z\") pod \"redhat-operators-dqz4m\" (UID: \"152d7097-80ce-42d3-b0a8-45e04a295b3d\") " pod="openshift-marketplace/redhat-operators-dqz4m" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.461275 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bgnnj" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.535735 5048 generic.go:334] "Generic (PLEG): container finished" podID="2a00d4f4-f561-491b-a236-7e46f411f58e" containerID="3c70defbccbd1be1ff38854122eb900c054c62ac49307698f6deea3bc622ac66" exitCode=0 Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.535792 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ngg4l" event={"ID":"2a00d4f4-f561-491b-a236-7e46f411f58e","Type":"ContainerDied","Data":"3c70defbccbd1be1ff38854122eb900c054c62ac49307698f6deea3bc622ac66"} Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.542379 5048 generic.go:334] "Generic (PLEG): container finished" podID="07c47b88-4b41-47d9-ae48-feacb3431a47" containerID="4c1ca6ae2267d5553735a03a82e3ec9098f846fb0f4965bc82b53c34e622f164" exitCode=0 Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.542411 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g8j5s" event={"ID":"07c47b88-4b41-47d9-ae48-feacb3431a47","Type":"ContainerDied","Data":"4c1ca6ae2267d5553735a03a82e3ec9098f846fb0f4965bc82b53c34e622f164"} Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.559534 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/152d7097-80ce-42d3-b0a8-45e04a295b3d-utilities\") pod \"redhat-operators-dqz4m\" (UID: \"152d7097-80ce-42d3-b0a8-45e04a295b3d\") " pod="openshift-marketplace/redhat-operators-dqz4m" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.559592 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/152d7097-80ce-42d3-b0a8-45e04a295b3d-catalog-content\") pod \"redhat-operators-dqz4m\" (UID: \"152d7097-80ce-42d3-b0a8-45e04a295b3d\") " pod="openshift-marketplace/redhat-operators-dqz4m" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.559615 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7844z\" (UniqueName: \"kubernetes.io/projected/152d7097-80ce-42d3-b0a8-45e04a295b3d-kube-api-access-7844z\") pod \"redhat-operators-dqz4m\" (UID: \"152d7097-80ce-42d3-b0a8-45e04a295b3d\") " pod="openshift-marketplace/redhat-operators-dqz4m" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.560498 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/152d7097-80ce-42d3-b0a8-45e04a295b3d-utilities\") pod \"redhat-operators-dqz4m\" (UID: \"152d7097-80ce-42d3-b0a8-45e04a295b3d\") " pod="openshift-marketplace/redhat-operators-dqz4m" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.560781 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/152d7097-80ce-42d3-b0a8-45e04a295b3d-catalog-content\") pod \"redhat-operators-dqz4m\" (UID: \"152d7097-80ce-42d3-b0a8-45e04a295b3d\") " pod="openshift-marketplace/redhat-operators-dqz4m" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.583879 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.587857 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7844z\" (UniqueName: \"kubernetes.io/projected/152d7097-80ce-42d3-b0a8-45e04a295b3d-kube-api-access-7844z\") pod \"redhat-operators-dqz4m\" (UID: \"152d7097-80ce-42d3-b0a8-45e04a295b3d\") " pod="openshift-marketplace/redhat-operators-dqz4m" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.590841 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.610395 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg"] Dec 13 06:35:03 crc kubenswrapper[5048]: E1213 06:35:03.610629 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c41022d8-8b60-453c-a928-2432a702d3e9" containerName="controller-manager" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.610641 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="c41022d8-8b60-453c-a928-2432a702d3e9" containerName="controller-manager" Dec 13 06:35:03 crc kubenswrapper[5048]: E1213 06:35:03.610656 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c502429-c5ef-4960-96eb-c1eb5b0a84d9" containerName="route-controller-manager" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.610662 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c502429-c5ef-4960-96eb-c1eb5b0a84d9" containerName="route-controller-manager" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.610748 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="c41022d8-8b60-453c-a928-2432a702d3e9" containerName="controller-manager" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.610759 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c502429-c5ef-4960-96eb-c1eb5b0a84d9" containerName="route-controller-manager" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.611202 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.616980 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dqz4m" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.642809 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg"] Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.761146 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c41022d8-8b60-453c-a928-2432a702d3e9-config\") pod \"c41022d8-8b60-453c-a928-2432a702d3e9\" (UID: \"c41022d8-8b60-453c-a928-2432a702d3e9\") " Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.761223 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dt7hr\" (UniqueName: \"kubernetes.io/projected/c41022d8-8b60-453c-a928-2432a702d3e9-kube-api-access-dt7hr\") pod \"c41022d8-8b60-453c-a928-2432a702d3e9\" (UID: \"c41022d8-8b60-453c-a928-2432a702d3e9\") " Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.761243 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c41022d8-8b60-453c-a928-2432a702d3e9-proxy-ca-bundles\") pod \"c41022d8-8b60-453c-a928-2432a702d3e9\" (UID: \"c41022d8-8b60-453c-a928-2432a702d3e9\") " Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.761267 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-client-ca\") pod \"4c502429-c5ef-4960-96eb-c1eb5b0a84d9\" (UID: \"4c502429-c5ef-4960-96eb-c1eb5b0a84d9\") " Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.761294 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-config\") pod \"4c502429-c5ef-4960-96eb-c1eb5b0a84d9\" (UID: \"4c502429-c5ef-4960-96eb-c1eb5b0a84d9\") " Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.761321 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-serving-cert\") pod \"4c502429-c5ef-4960-96eb-c1eb5b0a84d9\" (UID: \"4c502429-c5ef-4960-96eb-c1eb5b0a84d9\") " Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.761370 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c41022d8-8b60-453c-a928-2432a702d3e9-client-ca\") pod \"c41022d8-8b60-453c-a928-2432a702d3e9\" (UID: \"c41022d8-8b60-453c-a928-2432a702d3e9\") " Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.761387 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c41022d8-8b60-453c-a928-2432a702d3e9-serving-cert\") pod \"c41022d8-8b60-453c-a928-2432a702d3e9\" (UID: \"c41022d8-8b60-453c-a928-2432a702d3e9\") " Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.761417 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c8p62\" (UniqueName: \"kubernetes.io/projected/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-kube-api-access-c8p62\") pod \"4c502429-c5ef-4960-96eb-c1eb5b0a84d9\" (UID: \"4c502429-c5ef-4960-96eb-c1eb5b0a84d9\") " Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.761562 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22hnm\" (UniqueName: \"kubernetes.io/projected/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-kube-api-access-22hnm\") pod \"route-controller-manager-5f665f7bd4-h44gg\" (UID: \"f84bf265-d91a-4c4f-85b9-9f3e4475b08a\") " pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.761588 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-client-ca\") pod \"route-controller-manager-5f665f7bd4-h44gg\" (UID: \"f84bf265-d91a-4c4f-85b9-9f3e4475b08a\") " pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.761609 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-config\") pod \"route-controller-manager-5f665f7bd4-h44gg\" (UID: \"f84bf265-d91a-4c4f-85b9-9f3e4475b08a\") " pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.761679 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-serving-cert\") pod \"route-controller-manager-5f665f7bd4-h44gg\" (UID: \"f84bf265-d91a-4c4f-85b9-9f3e4475b08a\") " pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.763022 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c41022d8-8b60-453c-a928-2432a702d3e9-client-ca" (OuterVolumeSpecName: "client-ca") pod "c41022d8-8b60-453c-a928-2432a702d3e9" (UID: "c41022d8-8b60-453c-a928-2432a702d3e9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.763170 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-client-ca" (OuterVolumeSpecName: "client-ca") pod "4c502429-c5ef-4960-96eb-c1eb5b0a84d9" (UID: "4c502429-c5ef-4960-96eb-c1eb5b0a84d9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.763595 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-config" (OuterVolumeSpecName: "config") pod "4c502429-c5ef-4960-96eb-c1eb5b0a84d9" (UID: "4c502429-c5ef-4960-96eb-c1eb5b0a84d9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.764006 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c41022d8-8b60-453c-a928-2432a702d3e9-config" (OuterVolumeSpecName: "config") pod "c41022d8-8b60-453c-a928-2432a702d3e9" (UID: "c41022d8-8b60-453c-a928-2432a702d3e9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.765883 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c41022d8-8b60-453c-a928-2432a702d3e9-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "c41022d8-8b60-453c-a928-2432a702d3e9" (UID: "c41022d8-8b60-453c-a928-2432a702d3e9"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.767671 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c41022d8-8b60-453c-a928-2432a702d3e9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "c41022d8-8b60-453c-a928-2432a702d3e9" (UID: "c41022d8-8b60-453c-a928-2432a702d3e9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.768019 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "4c502429-c5ef-4960-96eb-c1eb5b0a84d9" (UID: "4c502429-c5ef-4960-96eb-c1eb5b0a84d9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.768021 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-kube-api-access-c8p62" (OuterVolumeSpecName: "kube-api-access-c8p62") pod "4c502429-c5ef-4960-96eb-c1eb5b0a84d9" (UID: "4c502429-c5ef-4960-96eb-c1eb5b0a84d9"). InnerVolumeSpecName "kube-api-access-c8p62". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.768385 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c41022d8-8b60-453c-a928-2432a702d3e9-kube-api-access-dt7hr" (OuterVolumeSpecName: "kube-api-access-dt7hr") pod "c41022d8-8b60-453c-a928-2432a702d3e9" (UID: "c41022d8-8b60-453c-a928-2432a702d3e9"). InnerVolumeSpecName "kube-api-access-dt7hr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.817732 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dqz4m"] Dec 13 06:35:03 crc kubenswrapper[5048]: W1213 06:35:03.825245 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod152d7097_80ce_42d3_b0a8_45e04a295b3d.slice/crio-fea3ea6fef562a6b9019c7254c994d741afecba78895f7649574fa91cfa036ac WatchSource:0}: Error finding container fea3ea6fef562a6b9019c7254c994d741afecba78895f7649574fa91cfa036ac: Status 404 returned error can't find the container with id fea3ea6fef562a6b9019c7254c994d741afecba78895f7649574fa91cfa036ac Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.863175 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-serving-cert\") pod \"route-controller-manager-5f665f7bd4-h44gg\" (UID: \"f84bf265-d91a-4c4f-85b9-9f3e4475b08a\") " pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.863383 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22hnm\" (UniqueName: \"kubernetes.io/projected/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-kube-api-access-22hnm\") pod \"route-controller-manager-5f665f7bd4-h44gg\" (UID: \"f84bf265-d91a-4c4f-85b9-9f3e4475b08a\") " pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.863427 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-client-ca\") pod \"route-controller-manager-5f665f7bd4-h44gg\" (UID: \"f84bf265-d91a-4c4f-85b9-9f3e4475b08a\") " pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.863488 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-config\") pod \"route-controller-manager-5f665f7bd4-h44gg\" (UID: \"f84bf265-d91a-4c4f-85b9-9f3e4475b08a\") " pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.863570 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c41022d8-8b60-453c-a928-2432a702d3e9-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.864081 5048 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c41022d8-8b60-453c-a928-2432a702d3e9-client-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.864632 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c8p62\" (UniqueName: \"kubernetes.io/projected/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-kube-api-access-c8p62\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.864680 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c41022d8-8b60-453c-a928-2432a702d3e9-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.864699 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dt7hr\" (UniqueName: \"kubernetes.io/projected/c41022d8-8b60-453c-a928-2432a702d3e9-kube-api-access-dt7hr\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.864713 5048 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c41022d8-8b60-453c-a928-2432a702d3e9-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.864725 5048 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-client-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.864740 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.864750 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4c502429-c5ef-4960-96eb-c1eb5b0a84d9-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.864778 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-client-ca\") pod \"route-controller-manager-5f665f7bd4-h44gg\" (UID: \"f84bf265-d91a-4c4f-85b9-9f3e4475b08a\") " pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.865586 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-config\") pod \"route-controller-manager-5f665f7bd4-h44gg\" (UID: \"f84bf265-d91a-4c4f-85b9-9f3e4475b08a\") " pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.867977 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-serving-cert\") pod \"route-controller-manager-5f665f7bd4-h44gg\" (UID: \"f84bf265-d91a-4c4f-85b9-9f3e4475b08a\") " pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.881479 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22hnm\" (UniqueName: \"kubernetes.io/projected/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-kube-api-access-22hnm\") pod \"route-controller-manager-5f665f7bd4-h44gg\" (UID: \"f84bf265-d91a-4c4f-85b9-9f3e4475b08a\") " pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.899169 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.907959 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bgnnj"] Dec 13 06:35:03 crc kubenswrapper[5048]: W1213 06:35:03.914124 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0cdd6ca2_d077_4ea1_8dda_6fadccca087d.slice/crio-c1d67cbe74fbda7ed15c5baba03f4ac1a97a812e1a99b5ffcb31916b80dc605f WatchSource:0}: Error finding container c1d67cbe74fbda7ed15c5baba03f4ac1a97a812e1a99b5ffcb31916b80dc605f: Status 404 returned error can't find the container with id c1d67cbe74fbda7ed15c5baba03f4ac1a97a812e1a99b5ffcb31916b80dc605f Dec 13 06:35:03 crc kubenswrapper[5048]: I1213 06:35:03.931206 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.189192 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.321530 5048 patch_prober.go:28] interesting pod/image-registry-697d97f7c8-7z7xz container/registry namespace/openshift-image-registry: Readiness probe status=failure output="Get \"https://10.217.0.26:5000/healthz\": dial tcp 10.217.0.26:5000: connect: connection refused" start-of-body= Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.321595 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" podUID="36ee331b-baa0-42ac-9bd3-7c52253814e1" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.26:5000/healthz\": dial tcp 10.217.0.26:5000: connect: connection refused" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.356151 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg"] Dec 13 06:35:04 crc kubenswrapper[5048]: W1213 06:35:04.368507 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf84bf265_d91a_4c4f_85b9_9f3e4475b08a.slice/crio-95b2269bfccd0e4493637e45185ba8cae59a0546a67d5ac4eea9ccb91461fe37 WatchSource:0}: Error finding container 95b2269bfccd0e4493637e45185ba8cae59a0546a67d5ac4eea9ccb91461fe37: Status 404 returned error can't find the container with id 95b2269bfccd0e4493637e45185ba8cae59a0546a67d5ac4eea9ccb91461fe37 Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.521587 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.549101 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dqz4m" event={"ID":"152d7097-80ce-42d3-b0a8-45e04a295b3d","Type":"ContainerStarted","Data":"fea3ea6fef562a6b9019c7254c994d741afecba78895f7649574fa91cfa036ac"} Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.550068 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" event={"ID":"f84bf265-d91a-4c4f-85b9-9f3e4475b08a","Type":"ContainerStarted","Data":"95b2269bfccd0e4493637e45185ba8cae59a0546a67d5ac4eea9ccb91461fe37"} Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.551661 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" event={"ID":"c41022d8-8b60-453c-a928-2432a702d3e9","Type":"ContainerDied","Data":"67d64f214e9aac8819fd2d479ad5215e48f9b7bc17b9f6aa420af38f0cf1fecb"} Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.551675 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-76f74d665d-wjh6p" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.551719 5048 scope.go:117] "RemoveContainer" containerID="c36e0f1f7decd4beb05cbe9a7a3cb322b0be0c27e1aa338398d8e6117be53b0f" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.553298 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.553606 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh" event={"ID":"4c502429-c5ef-4960-96eb-c1eb5b0a84d9","Type":"ContainerDied","Data":"4400ec571408fd02735fefc70dcfc17e65741e62a06a2644f2f4fdd1f1477c87"} Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.557964 5048 generic.go:334] "Generic (PLEG): container finished" podID="36ee331b-baa0-42ac-9bd3-7c52253814e1" containerID="01798a848041919b152c6308fa93ba8571978c608dfe2fa259c63306f76f0669" exitCode=0 Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.558029 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" event={"ID":"36ee331b-baa0-42ac-9bd3-7c52253814e1","Type":"ContainerDied","Data":"01798a848041919b152c6308fa93ba8571978c608dfe2fa259c63306f76f0669"} Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.558051 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" event={"ID":"36ee331b-baa0-42ac-9bd3-7c52253814e1","Type":"ContainerDied","Data":"a4cfa6a4403dbf348ca3a506785e52511dcd2998f028eeeec1d2470a5d48d3d1"} Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.558061 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-7z7xz" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.560450 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bgnnj" event={"ID":"0cdd6ca2-d077-4ea1-8dda-6fadccca087d","Type":"ContainerStarted","Data":"c1d67cbe74fbda7ed15c5baba03f4ac1a97a812e1a99b5ffcb31916b80dc605f"} Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.576623 5048 scope.go:117] "RemoveContainer" containerID="463a6830bd27925df4c128df691f0cfbdcc89198859eaccf6bf0d15fc50e7d76" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.604726 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh"] Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.611037 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b5c687b9c-xbwgh"] Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.611098 5048 scope.go:117] "RemoveContainer" containerID="01798a848041919b152c6308fa93ba8571978c608dfe2fa259c63306f76f0669" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.614107 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-76f74d665d-wjh6p"] Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.618024 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-76f74d665d-wjh6p"] Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.629692 5048 scope.go:117] "RemoveContainer" containerID="01798a848041919b152c6308fa93ba8571978c608dfe2fa259c63306f76f0669" Dec 13 06:35:04 crc kubenswrapper[5048]: E1213 06:35:04.630255 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01798a848041919b152c6308fa93ba8571978c608dfe2fa259c63306f76f0669\": container with ID starting with 01798a848041919b152c6308fa93ba8571978c608dfe2fa259c63306f76f0669 not found: ID does not exist" containerID="01798a848041919b152c6308fa93ba8571978c608dfe2fa259c63306f76f0669" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.630313 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01798a848041919b152c6308fa93ba8571978c608dfe2fa259c63306f76f0669"} err="failed to get container status \"01798a848041919b152c6308fa93ba8571978c608dfe2fa259c63306f76f0669\": rpc error: code = NotFound desc = could not find container \"01798a848041919b152c6308fa93ba8571978c608dfe2fa259c63306f76f0669\": container with ID starting with 01798a848041919b152c6308fa93ba8571978c608dfe2fa259c63306f76f0669 not found: ID does not exist" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.677955 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/36ee331b-baa0-42ac-9bd3-7c52253814e1-registry-tls\") pod \"36ee331b-baa0-42ac-9bd3-7c52253814e1\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.678045 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/36ee331b-baa0-42ac-9bd3-7c52253814e1-installation-pull-secrets\") pod \"36ee331b-baa0-42ac-9bd3-7c52253814e1\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.678113 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/36ee331b-baa0-42ac-9bd3-7c52253814e1-bound-sa-token\") pod \"36ee331b-baa0-42ac-9bd3-7c52253814e1\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.678286 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"36ee331b-baa0-42ac-9bd3-7c52253814e1\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.678356 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/36ee331b-baa0-42ac-9bd3-7c52253814e1-trusted-ca\") pod \"36ee331b-baa0-42ac-9bd3-7c52253814e1\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.678417 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kbc8x\" (UniqueName: \"kubernetes.io/projected/36ee331b-baa0-42ac-9bd3-7c52253814e1-kube-api-access-kbc8x\") pod \"36ee331b-baa0-42ac-9bd3-7c52253814e1\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.678487 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/36ee331b-baa0-42ac-9bd3-7c52253814e1-ca-trust-extracted\") pod \"36ee331b-baa0-42ac-9bd3-7c52253814e1\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.678539 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/36ee331b-baa0-42ac-9bd3-7c52253814e1-registry-certificates\") pod \"36ee331b-baa0-42ac-9bd3-7c52253814e1\" (UID: \"36ee331b-baa0-42ac-9bd3-7c52253814e1\") " Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.679679 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36ee331b-baa0-42ac-9bd3-7c52253814e1-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "36ee331b-baa0-42ac-9bd3-7c52253814e1" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.679930 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36ee331b-baa0-42ac-9bd3-7c52253814e1-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "36ee331b-baa0-42ac-9bd3-7c52253814e1" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.684231 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36ee331b-baa0-42ac-9bd3-7c52253814e1-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "36ee331b-baa0-42ac-9bd3-7c52253814e1" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.687851 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36ee331b-baa0-42ac-9bd3-7c52253814e1-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "36ee331b-baa0-42ac-9bd3-7c52253814e1" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.688739 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "36ee331b-baa0-42ac-9bd3-7c52253814e1" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.691322 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36ee331b-baa0-42ac-9bd3-7c52253814e1-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "36ee331b-baa0-42ac-9bd3-7c52253814e1" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.701813 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36ee331b-baa0-42ac-9bd3-7c52253814e1-kube-api-access-kbc8x" (OuterVolumeSpecName: "kube-api-access-kbc8x") pod "36ee331b-baa0-42ac-9bd3-7c52253814e1" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1"). InnerVolumeSpecName "kube-api-access-kbc8x". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.701921 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36ee331b-baa0-42ac-9bd3-7c52253814e1-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "36ee331b-baa0-42ac-9bd3-7c52253814e1" (UID: "36ee331b-baa0-42ac-9bd3-7c52253814e1"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.779696 5048 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/36ee331b-baa0-42ac-9bd3-7c52253814e1-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.779729 5048 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/36ee331b-baa0-42ac-9bd3-7c52253814e1-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.779739 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kbc8x\" (UniqueName: \"kubernetes.io/projected/36ee331b-baa0-42ac-9bd3-7c52253814e1-kube-api-access-kbc8x\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.779750 5048 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/36ee331b-baa0-42ac-9bd3-7c52253814e1-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.779759 5048 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/36ee331b-baa0-42ac-9bd3-7c52253814e1-registry-certificates\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.779767 5048 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/36ee331b-baa0-42ac-9bd3-7c52253814e1-registry-tls\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.779776 5048 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/36ee331b-baa0-42ac-9bd3-7c52253814e1-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.907324 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-7z7xz"] Dec 13 06:35:04 crc kubenswrapper[5048]: I1213 06:35:04.910988 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-7z7xz"] Dec 13 06:35:05 crc kubenswrapper[5048]: I1213 06:35:05.570089 5048 generic.go:334] "Generic (PLEG): container finished" podID="152d7097-80ce-42d3-b0a8-45e04a295b3d" containerID="db5cfd9ade1c5922fb7e145e3026d168dea8f3ad814e87c8ef44d5d7d8984fdf" exitCode=0 Dec 13 06:35:05 crc kubenswrapper[5048]: I1213 06:35:05.570172 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dqz4m" event={"ID":"152d7097-80ce-42d3-b0a8-45e04a295b3d","Type":"ContainerDied","Data":"db5cfd9ade1c5922fb7e145e3026d168dea8f3ad814e87c8ef44d5d7d8984fdf"} Dec 13 06:35:05 crc kubenswrapper[5048]: I1213 06:35:05.574520 5048 generic.go:334] "Generic (PLEG): container finished" podID="0cdd6ca2-d077-4ea1-8dda-6fadccca087d" containerID="14d6cc9dfc601a19ba8b356c89170fd57dc55fe7493b0381ff2fc97398bf5b92" exitCode=0 Dec 13 06:35:05 crc kubenswrapper[5048]: I1213 06:35:05.574557 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bgnnj" event={"ID":"0cdd6ca2-d077-4ea1-8dda-6fadccca087d","Type":"ContainerDied","Data":"14d6cc9dfc601a19ba8b356c89170fd57dc55fe7493b0381ff2fc97398bf5b92"} Dec 13 06:35:05 crc kubenswrapper[5048]: I1213 06:35:05.929363 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-77b768d98c-k6m54"] Dec 13 06:35:05 crc kubenswrapper[5048]: E1213 06:35:05.929839 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36ee331b-baa0-42ac-9bd3-7c52253814e1" containerName="registry" Dec 13 06:35:05 crc kubenswrapper[5048]: I1213 06:35:05.929857 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="36ee331b-baa0-42ac-9bd3-7c52253814e1" containerName="registry" Dec 13 06:35:05 crc kubenswrapper[5048]: I1213 06:35:05.929983 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="36ee331b-baa0-42ac-9bd3-7c52253814e1" containerName="registry" Dec 13 06:35:05 crc kubenswrapper[5048]: I1213 06:35:05.930421 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" Dec 13 06:35:05 crc kubenswrapper[5048]: I1213 06:35:05.932017 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 13 06:35:05 crc kubenswrapper[5048]: I1213 06:35:05.932042 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 13 06:35:05 crc kubenswrapper[5048]: I1213 06:35:05.932211 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 13 06:35:05 crc kubenswrapper[5048]: I1213 06:35:05.932483 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 13 06:35:05 crc kubenswrapper[5048]: I1213 06:35:05.933361 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 13 06:35:05 crc kubenswrapper[5048]: I1213 06:35:05.935619 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 13 06:35:05 crc kubenswrapper[5048]: I1213 06:35:05.942255 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 13 06:35:05 crc kubenswrapper[5048]: I1213 06:35:05.943556 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-77b768d98c-k6m54"] Dec 13 06:35:06 crc kubenswrapper[5048]: I1213 06:35:06.009100 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-serving-cert\") pod \"controller-manager-77b768d98c-k6m54\" (UID: \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\") " pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" Dec 13 06:35:06 crc kubenswrapper[5048]: I1213 06:35:06.009138 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-client-ca\") pod \"controller-manager-77b768d98c-k6m54\" (UID: \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\") " pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" Dec 13 06:35:06 crc kubenswrapper[5048]: I1213 06:35:06.009173 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrjxf\" (UniqueName: \"kubernetes.io/projected/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-kube-api-access-nrjxf\") pod \"controller-manager-77b768d98c-k6m54\" (UID: \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\") " pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" Dec 13 06:35:06 crc kubenswrapper[5048]: I1213 06:35:06.009208 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-config\") pod \"controller-manager-77b768d98c-k6m54\" (UID: \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\") " pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" Dec 13 06:35:06 crc kubenswrapper[5048]: I1213 06:35:06.009226 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-proxy-ca-bundles\") pod \"controller-manager-77b768d98c-k6m54\" (UID: \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\") " pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" Dec 13 06:35:06 crc kubenswrapper[5048]: I1213 06:35:06.110605 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrjxf\" (UniqueName: \"kubernetes.io/projected/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-kube-api-access-nrjxf\") pod \"controller-manager-77b768d98c-k6m54\" (UID: \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\") " pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" Dec 13 06:35:06 crc kubenswrapper[5048]: I1213 06:35:06.110692 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-config\") pod \"controller-manager-77b768d98c-k6m54\" (UID: \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\") " pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" Dec 13 06:35:06 crc kubenswrapper[5048]: I1213 06:35:06.110724 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-proxy-ca-bundles\") pod \"controller-manager-77b768d98c-k6m54\" (UID: \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\") " pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" Dec 13 06:35:06 crc kubenswrapper[5048]: I1213 06:35:06.110784 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-serving-cert\") pod \"controller-manager-77b768d98c-k6m54\" (UID: \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\") " pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" Dec 13 06:35:06 crc kubenswrapper[5048]: I1213 06:35:06.110805 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-client-ca\") pod \"controller-manager-77b768d98c-k6m54\" (UID: \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\") " pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" Dec 13 06:35:06 crc kubenswrapper[5048]: I1213 06:35:06.112113 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-client-ca\") pod \"controller-manager-77b768d98c-k6m54\" (UID: \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\") " pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" Dec 13 06:35:06 crc kubenswrapper[5048]: I1213 06:35:06.113019 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-config\") pod \"controller-manager-77b768d98c-k6m54\" (UID: \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\") " pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" Dec 13 06:35:06 crc kubenswrapper[5048]: I1213 06:35:06.113194 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-proxy-ca-bundles\") pod \"controller-manager-77b768d98c-k6m54\" (UID: \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\") " pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" Dec 13 06:35:06 crc kubenswrapper[5048]: I1213 06:35:06.118457 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-serving-cert\") pod \"controller-manager-77b768d98c-k6m54\" (UID: \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\") " pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" Dec 13 06:35:06 crc kubenswrapper[5048]: I1213 06:35:06.129136 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrjxf\" (UniqueName: \"kubernetes.io/projected/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-kube-api-access-nrjxf\") pod \"controller-manager-77b768d98c-k6m54\" (UID: \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\") " pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" Dec 13 06:35:06 crc kubenswrapper[5048]: I1213 06:35:06.243732 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" Dec 13 06:35:18 crc kubenswrapper[5048]: I1213 06:35:06.572177 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36ee331b-baa0-42ac-9bd3-7c52253814e1" path="/var/lib/kubelet/pods/36ee331b-baa0-42ac-9bd3-7c52253814e1/volumes" Dec 13 06:35:18 crc kubenswrapper[5048]: I1213 06:35:06.572967 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c502429-c5ef-4960-96eb-c1eb5b0a84d9" path="/var/lib/kubelet/pods/4c502429-c5ef-4960-96eb-c1eb5b0a84d9/volumes" Dec 13 06:35:18 crc kubenswrapper[5048]: I1213 06:35:06.573480 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c41022d8-8b60-453c-a928-2432a702d3e9" path="/var/lib/kubelet/pods/c41022d8-8b60-453c-a928-2432a702d3e9/volumes" Dec 13 06:35:18 crc kubenswrapper[5048]: I1213 06:35:06.641160 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Dec 13 06:35:18 crc kubenswrapper[5048]: I1213 06:35:09.703577 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Dec 13 06:35:18 crc kubenswrapper[5048]: I1213 06:35:11.612481 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" event={"ID":"f84bf265-d91a-4c4f-85b9-9f3e4475b08a","Type":"ContainerStarted","Data":"3376361f09a0909fafd79b6a52e9fa733690ead4b00050083aa0b22416206944"} Dec 13 06:35:18 crc kubenswrapper[5048]: I1213 06:35:14.626508 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" Dec 13 06:35:18 crc kubenswrapper[5048]: I1213 06:35:14.633207 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" Dec 13 06:35:18 crc kubenswrapper[5048]: I1213 06:35:14.642314 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" podStartSLOduration=13.642291267000001 podStartE2EDuration="13.642291267s" podCreationTimestamp="2025-12-13 06:35:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:35:14.641427064 +0000 UTC m=+348.508021665" watchObservedRunningTime="2025-12-13 06:35:14.642291267 +0000 UTC m=+348.508885858" Dec 13 06:35:18 crc kubenswrapper[5048]: I1213 06:35:17.212063 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Dec 13 06:35:18 crc kubenswrapper[5048]: I1213 06:35:17.576732 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Dec 13 06:35:18 crc kubenswrapper[5048]: I1213 06:35:18.684156 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-77b768d98c-k6m54"] Dec 13 06:35:21 crc kubenswrapper[5048]: I1213 06:35:21.015324 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-77b768d98c-k6m54"] Dec 13 06:35:21 crc kubenswrapper[5048]: I1213 06:35:21.030329 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg"] Dec 13 06:35:21 crc kubenswrapper[5048]: I1213 06:35:21.030540 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" podUID="f84bf265-d91a-4c4f-85b9-9f3e4475b08a" containerName="route-controller-manager" containerID="cri-o://3376361f09a0909fafd79b6a52e9fa733690ead4b00050083aa0b22416206944" gracePeriod=30 Dec 13 06:35:21 crc kubenswrapper[5048]: E1213 06:35:21.630135 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Dec 13 06:35:21 crc kubenswrapper[5048]: E1213 06:35:21.630530 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nzths,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-g8j5s_openshift-marketplace(07c47b88-4b41-47d9-ae48-feacb3431a47): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 13 06:35:21 crc kubenswrapper[5048]: E1213 06:35:21.632639 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-g8j5s" podUID="07c47b88-4b41-47d9-ae48-feacb3431a47" Dec 13 06:35:21 crc kubenswrapper[5048]: I1213 06:35:21.665465 5048 generic.go:334] "Generic (PLEG): container finished" podID="f84bf265-d91a-4c4f-85b9-9f3e4475b08a" containerID="3376361f09a0909fafd79b6a52e9fa733690ead4b00050083aa0b22416206944" exitCode=0 Dec 13 06:35:21 crc kubenswrapper[5048]: I1213 06:35:21.665541 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" event={"ID":"f84bf265-d91a-4c4f-85b9-9f3e4475b08a","Type":"ContainerDied","Data":"3376361f09a0909fafd79b6a52e9fa733690ead4b00050083aa0b22416206944"} Dec 13 06:35:21 crc kubenswrapper[5048]: I1213 06:35:21.666340 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" event={"ID":"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976","Type":"ContainerStarted","Data":"d0a32c78b10d12c91cc649c9e5dc0d004d8e02102f145339e4358fdced11bfe6"} Dec 13 06:35:21 crc kubenswrapper[5048]: E1213 06:35:21.667728 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-g8j5s" podUID="07c47b88-4b41-47d9-ae48-feacb3431a47" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.649628 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.689157 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" event={"ID":"f84bf265-d91a-4c4f-85b9-9f3e4475b08a","Type":"ContainerDied","Data":"95b2269bfccd0e4493637e45185ba8cae59a0546a67d5ac4eea9ccb91461fe37"} Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.689226 5048 scope.go:117] "RemoveContainer" containerID="3376361f09a0909fafd79b6a52e9fa733690ead4b00050083aa0b22416206944" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.689333 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.691884 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" event={"ID":"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976","Type":"ContainerStarted","Data":"f57c8fb8ce666657d1f1a45343841f0e2d7be99032d6ac0e35a075988afd8fc7"} Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.700834 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b5c687b9c-pqr2f"] Dec 13 06:35:22 crc kubenswrapper[5048]: E1213 06:35:22.701052 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f84bf265-d91a-4c4f-85b9-9f3e4475b08a" containerName="route-controller-manager" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.701069 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="f84bf265-d91a-4c4f-85b9-9f3e4475b08a" containerName="route-controller-manager" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.701194 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="f84bf265-d91a-4c4f-85b9-9f3e4475b08a" containerName="route-controller-manager" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.701589 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-pqr2f" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.702421 5048 generic.go:334] "Generic (PLEG): container finished" podID="2a00d4f4-f561-491b-a236-7e46f411f58e" containerID="7576d7bcc0a89a26d2d386ca0bbbf4387bfd74da35a1d9f22496dd77a671a8af" exitCode=0 Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.702481 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ngg4l" event={"ID":"2a00d4f4-f561-491b-a236-7e46f411f58e","Type":"ContainerDied","Data":"7576d7bcc0a89a26d2d386ca0bbbf4387bfd74da35a1d9f22496dd77a671a8af"} Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.722102 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b5c687b9c-pqr2f"] Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.812271 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-serving-cert\") pod \"f84bf265-d91a-4c4f-85b9-9f3e4475b08a\" (UID: \"f84bf265-d91a-4c4f-85b9-9f3e4475b08a\") " Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.812492 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-config\") pod \"f84bf265-d91a-4c4f-85b9-9f3e4475b08a\" (UID: \"f84bf265-d91a-4c4f-85b9-9f3e4475b08a\") " Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.812544 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-22hnm\" (UniqueName: \"kubernetes.io/projected/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-kube-api-access-22hnm\") pod \"f84bf265-d91a-4c4f-85b9-9f3e4475b08a\" (UID: \"f84bf265-d91a-4c4f-85b9-9f3e4475b08a\") " Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.813365 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-client-ca\") pod \"f84bf265-d91a-4c4f-85b9-9f3e4475b08a\" (UID: \"f84bf265-d91a-4c4f-85b9-9f3e4475b08a\") " Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.813468 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4d17ca1b-d3ed-467d-94ab-07bf67b9c626-client-ca\") pod \"route-controller-manager-6b5c687b9c-pqr2f\" (UID: \"4d17ca1b-d3ed-467d-94ab-07bf67b9c626\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-pqr2f" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.813501 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d17ca1b-d3ed-467d-94ab-07bf67b9c626-config\") pod \"route-controller-manager-6b5c687b9c-pqr2f\" (UID: \"4d17ca1b-d3ed-467d-94ab-07bf67b9c626\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-pqr2f" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.813531 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbs6f\" (UniqueName: \"kubernetes.io/projected/4d17ca1b-d3ed-467d-94ab-07bf67b9c626-kube-api-access-sbs6f\") pod \"route-controller-manager-6b5c687b9c-pqr2f\" (UID: \"4d17ca1b-d3ed-467d-94ab-07bf67b9c626\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-pqr2f" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.813544 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-config" (OuterVolumeSpecName: "config") pod "f84bf265-d91a-4c4f-85b9-9f3e4475b08a" (UID: "f84bf265-d91a-4c4f-85b9-9f3e4475b08a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.813619 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4d17ca1b-d3ed-467d-94ab-07bf67b9c626-serving-cert\") pod \"route-controller-manager-6b5c687b9c-pqr2f\" (UID: \"4d17ca1b-d3ed-467d-94ab-07bf67b9c626\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-pqr2f" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.813658 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.813907 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-client-ca" (OuterVolumeSpecName: "client-ca") pod "f84bf265-d91a-4c4f-85b9-9f3e4475b08a" (UID: "f84bf265-d91a-4c4f-85b9-9f3e4475b08a"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.818688 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-kube-api-access-22hnm" (OuterVolumeSpecName: "kube-api-access-22hnm") pod "f84bf265-d91a-4c4f-85b9-9f3e4475b08a" (UID: "f84bf265-d91a-4c4f-85b9-9f3e4475b08a"). InnerVolumeSpecName "kube-api-access-22hnm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.829907 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f84bf265-d91a-4c4f-85b9-9f3e4475b08a" (UID: "f84bf265-d91a-4c4f-85b9-9f3e4475b08a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.914621 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4d17ca1b-d3ed-467d-94ab-07bf67b9c626-client-ca\") pod \"route-controller-manager-6b5c687b9c-pqr2f\" (UID: \"4d17ca1b-d3ed-467d-94ab-07bf67b9c626\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-pqr2f" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.914671 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d17ca1b-d3ed-467d-94ab-07bf67b9c626-config\") pod \"route-controller-manager-6b5c687b9c-pqr2f\" (UID: \"4d17ca1b-d3ed-467d-94ab-07bf67b9c626\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-pqr2f" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.914700 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbs6f\" (UniqueName: \"kubernetes.io/projected/4d17ca1b-d3ed-467d-94ab-07bf67b9c626-kube-api-access-sbs6f\") pod \"route-controller-manager-6b5c687b9c-pqr2f\" (UID: \"4d17ca1b-d3ed-467d-94ab-07bf67b9c626\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-pqr2f" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.914747 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4d17ca1b-d3ed-467d-94ab-07bf67b9c626-serving-cert\") pod \"route-controller-manager-6b5c687b9c-pqr2f\" (UID: \"4d17ca1b-d3ed-467d-94ab-07bf67b9c626\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-pqr2f" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.914789 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-22hnm\" (UniqueName: \"kubernetes.io/projected/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-kube-api-access-22hnm\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.914799 5048 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-client-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.914807 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f84bf265-d91a-4c4f-85b9-9f3e4475b08a-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.915644 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4d17ca1b-d3ed-467d-94ab-07bf67b9c626-client-ca\") pod \"route-controller-manager-6b5c687b9c-pqr2f\" (UID: \"4d17ca1b-d3ed-467d-94ab-07bf67b9c626\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-pqr2f" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.916261 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d17ca1b-d3ed-467d-94ab-07bf67b9c626-config\") pod \"route-controller-manager-6b5c687b9c-pqr2f\" (UID: \"4d17ca1b-d3ed-467d-94ab-07bf67b9c626\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-pqr2f" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.918184 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4d17ca1b-d3ed-467d-94ab-07bf67b9c626-serving-cert\") pod \"route-controller-manager-6b5c687b9c-pqr2f\" (UID: \"4d17ca1b-d3ed-467d-94ab-07bf67b9c626\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-pqr2f" Dec 13 06:35:22 crc kubenswrapper[5048]: I1213 06:35:22.944270 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbs6f\" (UniqueName: \"kubernetes.io/projected/4d17ca1b-d3ed-467d-94ab-07bf67b9c626-kube-api-access-sbs6f\") pod \"route-controller-manager-6b5c687b9c-pqr2f\" (UID: \"4d17ca1b-d3ed-467d-94ab-07bf67b9c626\") " pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-pqr2f" Dec 13 06:35:23 crc kubenswrapper[5048]: I1213 06:35:23.013769 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-pqr2f" Dec 13 06:35:23 crc kubenswrapper[5048]: I1213 06:35:23.020177 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg"] Dec 13 06:35:23 crc kubenswrapper[5048]: I1213 06:35:23.023824 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f665f7bd4-h44gg"] Dec 13 06:35:23 crc kubenswrapper[5048]: W1213 06:35:23.212151 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4d17ca1b_d3ed_467d_94ab_07bf67b9c626.slice/crio-bdc566d9c4620960713f486aa9d54ac7e796b6d00c06bb26b07149839b321c49 WatchSource:0}: Error finding container bdc566d9c4620960713f486aa9d54ac7e796b6d00c06bb26b07149839b321c49: Status 404 returned error can't find the container with id bdc566d9c4620960713f486aa9d54ac7e796b6d00c06bb26b07149839b321c49 Dec 13 06:35:23 crc kubenswrapper[5048]: I1213 06:35:23.213529 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b5c687b9c-pqr2f"] Dec 13 06:35:23 crc kubenswrapper[5048]: I1213 06:35:23.711944 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-pqr2f" event={"ID":"4d17ca1b-d3ed-467d-94ab-07bf67b9c626","Type":"ContainerStarted","Data":"bdc566d9c4620960713f486aa9d54ac7e796b6d00c06bb26b07149839b321c49"} Dec 13 06:35:24 crc kubenswrapper[5048]: I1213 06:35:24.573981 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f84bf265-d91a-4c4f-85b9-9f3e4475b08a" path="/var/lib/kubelet/pods/f84bf265-d91a-4c4f-85b9-9f3e4475b08a/volumes" Dec 13 06:35:25 crc kubenswrapper[5048]: I1213 06:35:25.726729 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-pqr2f" event={"ID":"4d17ca1b-d3ed-467d-94ab-07bf67b9c626","Type":"ContainerStarted","Data":"3a618ad7f142fdf19f58dd3d16716482e427738bcefdb20f4512234ec5786f62"} Dec 13 06:35:25 crc kubenswrapper[5048]: I1213 06:35:25.726819 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" podUID="c08f1f78-79ba-4c67-a36b-ecd4bf8bc976" containerName="controller-manager" containerID="cri-o://f57c8fb8ce666657d1f1a45343841f0e2d7be99032d6ac0e35a075988afd8fc7" gracePeriod=30 Dec 13 06:35:25 crc kubenswrapper[5048]: I1213 06:35:25.726994 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" Dec 13 06:35:25 crc kubenswrapper[5048]: I1213 06:35:25.733553 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" Dec 13 06:35:25 crc kubenswrapper[5048]: I1213 06:35:25.748977 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" podStartSLOduration=24.748958701 podStartE2EDuration="24.748958701s" podCreationTimestamp="2025-12-13 06:35:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:35:25.746197603 +0000 UTC m=+359.612792194" watchObservedRunningTime="2025-12-13 06:35:25.748958701 +0000 UTC m=+359.615553282" Dec 13 06:35:25 crc kubenswrapper[5048]: I1213 06:35:25.770314 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-pqr2f" podStartSLOduration=4.770294473 podStartE2EDuration="4.770294473s" podCreationTimestamp="2025-12-13 06:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:35:25.768346718 +0000 UTC m=+359.634941339" watchObservedRunningTime="2025-12-13 06:35:25.770294473 +0000 UTC m=+359.636889064" Dec 13 06:35:26 crc kubenswrapper[5048]: I1213 06:35:26.244780 5048 patch_prober.go:28] interesting pod/controller-manager-77b768d98c-k6m54 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.71:8443/healthz\": dial tcp 10.217.0.71:8443: connect: connection refused" start-of-body= Dec 13 06:35:26 crc kubenswrapper[5048]: I1213 06:35:26.244872 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" podUID="c08f1f78-79ba-4c67-a36b-ecd4bf8bc976" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.71:8443/healthz\": dial tcp 10.217.0.71:8443: connect: connection refused" Dec 13 06:35:26 crc kubenswrapper[5048]: I1213 06:35:26.735890 5048 generic.go:334] "Generic (PLEG): container finished" podID="c08f1f78-79ba-4c67-a36b-ecd4bf8bc976" containerID="f57c8fb8ce666657d1f1a45343841f0e2d7be99032d6ac0e35a075988afd8fc7" exitCode=0 Dec 13 06:35:26 crc kubenswrapper[5048]: I1213 06:35:26.735953 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" event={"ID":"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976","Type":"ContainerDied","Data":"f57c8fb8ce666657d1f1a45343841f0e2d7be99032d6ac0e35a075988afd8fc7"} Dec 13 06:35:26 crc kubenswrapper[5048]: I1213 06:35:26.736655 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-pqr2f" Dec 13 06:35:26 crc kubenswrapper[5048]: I1213 06:35:26.743095 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6b5c687b9c-pqr2f" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.529940 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.554465 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-76f74d665d-9rpmw"] Dec 13 06:35:27 crc kubenswrapper[5048]: E1213 06:35:27.554651 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c08f1f78-79ba-4c67-a36b-ecd4bf8bc976" containerName="controller-manager" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.554663 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="c08f1f78-79ba-4c67-a36b-ecd4bf8bc976" containerName="controller-manager" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.554753 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="c08f1f78-79ba-4c67-a36b-ecd4bf8bc976" containerName="controller-manager" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.555090 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-76f74d665d-9rpmw" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.570198 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-76f74d665d-9rpmw"] Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.581265 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-config\") pod \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\" (UID: \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\") " Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.581315 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrjxf\" (UniqueName: \"kubernetes.io/projected/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-kube-api-access-nrjxf\") pod \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\" (UID: \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\") " Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.581339 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-serving-cert\") pod \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\" (UID: \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\") " Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.581373 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-proxy-ca-bundles\") pod \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\" (UID: \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\") " Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.581400 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-client-ca\") pod \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\" (UID: \"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976\") " Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.581500 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f28fe907-3570-48ed-8a51-bf59848e3145-client-ca\") pod \"controller-manager-76f74d665d-9rpmw\" (UID: \"f28fe907-3570-48ed-8a51-bf59848e3145\") " pod="openshift-controller-manager/controller-manager-76f74d665d-9rpmw" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.581561 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f28fe907-3570-48ed-8a51-bf59848e3145-proxy-ca-bundles\") pod \"controller-manager-76f74d665d-9rpmw\" (UID: \"f28fe907-3570-48ed-8a51-bf59848e3145\") " pod="openshift-controller-manager/controller-manager-76f74d665d-9rpmw" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.581588 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f28fe907-3570-48ed-8a51-bf59848e3145-config\") pod \"controller-manager-76f74d665d-9rpmw\" (UID: \"f28fe907-3570-48ed-8a51-bf59848e3145\") " pod="openshift-controller-manager/controller-manager-76f74d665d-9rpmw" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.581617 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f28fe907-3570-48ed-8a51-bf59848e3145-serving-cert\") pod \"controller-manager-76f74d665d-9rpmw\" (UID: \"f28fe907-3570-48ed-8a51-bf59848e3145\") " pod="openshift-controller-manager/controller-manager-76f74d665d-9rpmw" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.581653 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvn6x\" (UniqueName: \"kubernetes.io/projected/f28fe907-3570-48ed-8a51-bf59848e3145-kube-api-access-fvn6x\") pod \"controller-manager-76f74d665d-9rpmw\" (UID: \"f28fe907-3570-48ed-8a51-bf59848e3145\") " pod="openshift-controller-manager/controller-manager-76f74d665d-9rpmw" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.582200 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-config" (OuterVolumeSpecName: "config") pod "c08f1f78-79ba-4c67-a36b-ecd4bf8bc976" (UID: "c08f1f78-79ba-4c67-a36b-ecd4bf8bc976"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.582993 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "c08f1f78-79ba-4c67-a36b-ecd4bf8bc976" (UID: "c08f1f78-79ba-4c67-a36b-ecd4bf8bc976"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.583005 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-client-ca" (OuterVolumeSpecName: "client-ca") pod "c08f1f78-79ba-4c67-a36b-ecd4bf8bc976" (UID: "c08f1f78-79ba-4c67-a36b-ecd4bf8bc976"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.587697 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-kube-api-access-nrjxf" (OuterVolumeSpecName: "kube-api-access-nrjxf") pod "c08f1f78-79ba-4c67-a36b-ecd4bf8bc976" (UID: "c08f1f78-79ba-4c67-a36b-ecd4bf8bc976"). InnerVolumeSpecName "kube-api-access-nrjxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.600572 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "c08f1f78-79ba-4c67-a36b-ecd4bf8bc976" (UID: "c08f1f78-79ba-4c67-a36b-ecd4bf8bc976"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.682371 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f28fe907-3570-48ed-8a51-bf59848e3145-serving-cert\") pod \"controller-manager-76f74d665d-9rpmw\" (UID: \"f28fe907-3570-48ed-8a51-bf59848e3145\") " pod="openshift-controller-manager/controller-manager-76f74d665d-9rpmw" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.682491 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvn6x\" (UniqueName: \"kubernetes.io/projected/f28fe907-3570-48ed-8a51-bf59848e3145-kube-api-access-fvn6x\") pod \"controller-manager-76f74d665d-9rpmw\" (UID: \"f28fe907-3570-48ed-8a51-bf59848e3145\") " pod="openshift-controller-manager/controller-manager-76f74d665d-9rpmw" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.682548 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f28fe907-3570-48ed-8a51-bf59848e3145-client-ca\") pod \"controller-manager-76f74d665d-9rpmw\" (UID: \"f28fe907-3570-48ed-8a51-bf59848e3145\") " pod="openshift-controller-manager/controller-manager-76f74d665d-9rpmw" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.682602 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f28fe907-3570-48ed-8a51-bf59848e3145-proxy-ca-bundles\") pod \"controller-manager-76f74d665d-9rpmw\" (UID: \"f28fe907-3570-48ed-8a51-bf59848e3145\") " pod="openshift-controller-manager/controller-manager-76f74d665d-9rpmw" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.682636 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f28fe907-3570-48ed-8a51-bf59848e3145-config\") pod \"controller-manager-76f74d665d-9rpmw\" (UID: \"f28fe907-3570-48ed-8a51-bf59848e3145\") " pod="openshift-controller-manager/controller-manager-76f74d665d-9rpmw" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.682681 5048 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.682695 5048 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-client-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.682709 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.682727 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrjxf\" (UniqueName: \"kubernetes.io/projected/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-kube-api-access-nrjxf\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.682745 5048 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.683703 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f28fe907-3570-48ed-8a51-bf59848e3145-client-ca\") pod \"controller-manager-76f74d665d-9rpmw\" (UID: \"f28fe907-3570-48ed-8a51-bf59848e3145\") " pod="openshift-controller-manager/controller-manager-76f74d665d-9rpmw" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.684148 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f28fe907-3570-48ed-8a51-bf59848e3145-config\") pod \"controller-manager-76f74d665d-9rpmw\" (UID: \"f28fe907-3570-48ed-8a51-bf59848e3145\") " pod="openshift-controller-manager/controller-manager-76f74d665d-9rpmw" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.684891 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f28fe907-3570-48ed-8a51-bf59848e3145-proxy-ca-bundles\") pod \"controller-manager-76f74d665d-9rpmw\" (UID: \"f28fe907-3570-48ed-8a51-bf59848e3145\") " pod="openshift-controller-manager/controller-manager-76f74d665d-9rpmw" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.687127 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f28fe907-3570-48ed-8a51-bf59848e3145-serving-cert\") pod \"controller-manager-76f74d665d-9rpmw\" (UID: \"f28fe907-3570-48ed-8a51-bf59848e3145\") " pod="openshift-controller-manager/controller-manager-76f74d665d-9rpmw" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.714219 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvn6x\" (UniqueName: \"kubernetes.io/projected/f28fe907-3570-48ed-8a51-bf59848e3145-kube-api-access-fvn6x\") pod \"controller-manager-76f74d665d-9rpmw\" (UID: \"f28fe907-3570-48ed-8a51-bf59848e3145\") " pod="openshift-controller-manager/controller-manager-76f74d665d-9rpmw" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.743734 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.743739 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-77b768d98c-k6m54" event={"ID":"c08f1f78-79ba-4c67-a36b-ecd4bf8bc976","Type":"ContainerDied","Data":"d0a32c78b10d12c91cc649c9e5dc0d004d8e02102f145339e4358fdced11bfe6"} Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.743823 5048 scope.go:117] "RemoveContainer" containerID="f57c8fb8ce666657d1f1a45343841f0e2d7be99032d6ac0e35a075988afd8fc7" Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.770290 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-77b768d98c-k6m54"] Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.775108 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-77b768d98c-k6m54"] Dec 13 06:35:27 crc kubenswrapper[5048]: I1213 06:35:27.881576 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-76f74d665d-9rpmw" Dec 13 06:35:28 crc kubenswrapper[5048]: I1213 06:35:28.573724 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c08f1f78-79ba-4c67-a36b-ecd4bf8bc976" path="/var/lib/kubelet/pods/c08f1f78-79ba-4c67-a36b-ecd4bf8bc976/volumes" Dec 13 06:35:29 crc kubenswrapper[5048]: I1213 06:35:29.760304 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bgnnj" event={"ID":"0cdd6ca2-d077-4ea1-8dda-6fadccca087d","Type":"ContainerStarted","Data":"e0cc37129b295d2094af88e521c86b7d0aa2bdefbdf59a4ca2055d330c9451d0"} Dec 13 06:35:29 crc kubenswrapper[5048]: I1213 06:35:29.762158 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ngg4l" event={"ID":"2a00d4f4-f561-491b-a236-7e46f411f58e","Type":"ContainerStarted","Data":"349eb777fcaa45d9c516a57dd5bc9ad4bb28bf71f0f7958be0a4a27392ed9b89"} Dec 13 06:35:29 crc kubenswrapper[5048]: I1213 06:35:29.763572 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dqz4m" event={"ID":"152d7097-80ce-42d3-b0a8-45e04a295b3d","Type":"ContainerStarted","Data":"e8699acee91483d85265c81e4262727e9ca6d98d6cbe9dc9525ed57de88673c1"} Dec 13 06:35:29 crc kubenswrapper[5048]: I1213 06:35:29.780552 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-76f74d665d-9rpmw"] Dec 13 06:35:29 crc kubenswrapper[5048]: W1213 06:35:29.783189 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf28fe907_3570_48ed_8a51_bf59848e3145.slice/crio-b26af57a5b5a592c9a4a90f1ff359adfa4cff563a3a04aec3b6ed9058331da13 WatchSource:0}: Error finding container b26af57a5b5a592c9a4a90f1ff359adfa4cff563a3a04aec3b6ed9058331da13: Status 404 returned error can't find the container with id b26af57a5b5a592c9a4a90f1ff359adfa4cff563a3a04aec3b6ed9058331da13 Dec 13 06:35:30 crc kubenswrapper[5048]: E1213 06:35:30.211649 5048 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod152d7097_80ce_42d3_b0a8_45e04a295b3d.slice/crio-e8699acee91483d85265c81e4262727e9ca6d98d6cbe9dc9525ed57de88673c1.scope\": RecentStats: unable to find data in memory cache]" Dec 13 06:35:30 crc kubenswrapper[5048]: I1213 06:35:30.768961 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-76f74d665d-9rpmw" event={"ID":"f28fe907-3570-48ed-8a51-bf59848e3145","Type":"ContainerStarted","Data":"487661becbcbe67ec520bcc369ec57bb9c0e96d27a399b82eaed92559a224dd9"} Dec 13 06:35:30 crc kubenswrapper[5048]: I1213 06:35:30.769002 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-76f74d665d-9rpmw" event={"ID":"f28fe907-3570-48ed-8a51-bf59848e3145","Type":"ContainerStarted","Data":"b26af57a5b5a592c9a4a90f1ff359adfa4cff563a3a04aec3b6ed9058331da13"} Dec 13 06:35:30 crc kubenswrapper[5048]: I1213 06:35:30.769195 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-76f74d665d-9rpmw" Dec 13 06:35:30 crc kubenswrapper[5048]: I1213 06:35:30.771296 5048 generic.go:334] "Generic (PLEG): container finished" podID="152d7097-80ce-42d3-b0a8-45e04a295b3d" containerID="e8699acee91483d85265c81e4262727e9ca6d98d6cbe9dc9525ed57de88673c1" exitCode=0 Dec 13 06:35:30 crc kubenswrapper[5048]: I1213 06:35:30.771343 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dqz4m" event={"ID":"152d7097-80ce-42d3-b0a8-45e04a295b3d","Type":"ContainerDied","Data":"e8699acee91483d85265c81e4262727e9ca6d98d6cbe9dc9525ed57de88673c1"} Dec 13 06:35:30 crc kubenswrapper[5048]: I1213 06:35:30.773287 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bgnnj" event={"ID":"0cdd6ca2-d077-4ea1-8dda-6fadccca087d","Type":"ContainerDied","Data":"e0cc37129b295d2094af88e521c86b7d0aa2bdefbdf59a4ca2055d330c9451d0"} Dec 13 06:35:30 crc kubenswrapper[5048]: I1213 06:35:30.773662 5048 generic.go:334] "Generic (PLEG): container finished" podID="0cdd6ca2-d077-4ea1-8dda-6fadccca087d" containerID="e0cc37129b295d2094af88e521c86b7d0aa2bdefbdf59a4ca2055d330c9451d0" exitCode=0 Dec 13 06:35:30 crc kubenswrapper[5048]: I1213 06:35:30.774024 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-76f74d665d-9rpmw" Dec 13 06:35:30 crc kubenswrapper[5048]: I1213 06:35:30.787872 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-76f74d665d-9rpmw" podStartSLOduration=9.78784926 podStartE2EDuration="9.78784926s" podCreationTimestamp="2025-12-13 06:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:35:30.786138052 +0000 UTC m=+364.652732633" watchObservedRunningTime="2025-12-13 06:35:30.78784926 +0000 UTC m=+364.654443841" Dec 13 06:35:30 crc kubenswrapper[5048]: I1213 06:35:30.851765 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-ngg4l" podStartSLOduration=5.031456515 podStartE2EDuration="30.851743747s" podCreationTimestamp="2025-12-13 06:35:00 +0000 UTC" firstStartedPulling="2025-12-13 06:35:03.541077308 +0000 UTC m=+337.407671889" lastFinishedPulling="2025-12-13 06:35:29.36136454 +0000 UTC m=+363.227959121" observedRunningTime="2025-12-13 06:35:30.829709034 +0000 UTC m=+364.696303625" watchObservedRunningTime="2025-12-13 06:35:30.851743747 +0000 UTC m=+364.718338328" Dec 13 06:35:31 crc kubenswrapper[5048]: I1213 06:35:31.211446 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-ngg4l" Dec 13 06:35:31 crc kubenswrapper[5048]: I1213 06:35:31.211526 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-ngg4l" Dec 13 06:35:32 crc kubenswrapper[5048]: I1213 06:35:32.252037 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-ngg4l" podUID="2a00d4f4-f561-491b-a236-7e46f411f58e" containerName="registry-server" probeResult="failure" output=< Dec 13 06:35:32 crc kubenswrapper[5048]: timeout: failed to connect service ":50051" within 1s Dec 13 06:35:32 crc kubenswrapper[5048]: > Dec 13 06:35:41 crc kubenswrapper[5048]: I1213 06:35:41.270638 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-ngg4l" Dec 13 06:35:41 crc kubenswrapper[5048]: I1213 06:35:41.318249 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-ngg4l" Dec 13 06:35:46 crc kubenswrapper[5048]: I1213 06:35:46.216544 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:35:46 crc kubenswrapper[5048]: I1213 06:35:46.217170 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:35:47 crc kubenswrapper[5048]: I1213 06:35:47.883919 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dqz4m" event={"ID":"152d7097-80ce-42d3-b0a8-45e04a295b3d","Type":"ContainerStarted","Data":"482176589cf5545d9fb4abc81b90711dd6156f38446f1d85319f825e5f57923a"} Dec 13 06:35:47 crc kubenswrapper[5048]: I1213 06:35:47.887798 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bgnnj" event={"ID":"0cdd6ca2-d077-4ea1-8dda-6fadccca087d","Type":"ContainerStarted","Data":"2bd0bb61259e8d0548d512614ff53d551d148d3791f6b19297a88c80d6f68343"} Dec 13 06:35:47 crc kubenswrapper[5048]: I1213 06:35:47.902771 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dqz4m" podStartSLOduration=10.197347045 podStartE2EDuration="44.902751894s" podCreationTimestamp="2025-12-13 06:35:03 +0000 UTC" firstStartedPulling="2025-12-13 06:35:12.620302204 +0000 UTC m=+346.486896785" lastFinishedPulling="2025-12-13 06:35:47.325707053 +0000 UTC m=+381.192301634" observedRunningTime="2025-12-13 06:35:47.901457527 +0000 UTC m=+381.768052108" watchObservedRunningTime="2025-12-13 06:35:47.902751894 +0000 UTC m=+381.769346475" Dec 13 06:35:47 crc kubenswrapper[5048]: I1213 06:35:47.921665 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bgnnj" podStartSLOduration=11.15728278 podStartE2EDuration="44.921646359s" podCreationTimestamp="2025-12-13 06:35:03 +0000 UTC" firstStartedPulling="2025-12-13 06:35:13.623640205 +0000 UTC m=+347.490234786" lastFinishedPulling="2025-12-13 06:35:47.388003784 +0000 UTC m=+381.254598365" observedRunningTime="2025-12-13 06:35:47.919887568 +0000 UTC m=+381.786482149" watchObservedRunningTime="2025-12-13 06:35:47.921646359 +0000 UTC m=+381.788240940" Dec 13 06:35:51 crc kubenswrapper[5048]: I1213 06:35:51.909017 5048 generic.go:334] "Generic (PLEG): container finished" podID="07c47b88-4b41-47d9-ae48-feacb3431a47" containerID="fa8365dc1a665538e2a64bb73a60d9b8b745ce546d3e0fb6ca91085c14c1ee08" exitCode=0 Dec 13 06:35:51 crc kubenswrapper[5048]: I1213 06:35:51.909094 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g8j5s" event={"ID":"07c47b88-4b41-47d9-ae48-feacb3431a47","Type":"ContainerDied","Data":"fa8365dc1a665538e2a64bb73a60d9b8b745ce546d3e0fb6ca91085c14c1ee08"} Dec 13 06:35:53 crc kubenswrapper[5048]: I1213 06:35:53.461778 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bgnnj" Dec 13 06:35:53 crc kubenswrapper[5048]: I1213 06:35:53.462109 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bgnnj" Dec 13 06:35:53 crc kubenswrapper[5048]: I1213 06:35:53.496337 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bgnnj" Dec 13 06:35:53 crc kubenswrapper[5048]: I1213 06:35:53.618398 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dqz4m" Dec 13 06:35:53 crc kubenswrapper[5048]: I1213 06:35:53.618479 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dqz4m" Dec 13 06:35:53 crc kubenswrapper[5048]: I1213 06:35:53.657051 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dqz4m" Dec 13 06:35:53 crc kubenswrapper[5048]: I1213 06:35:53.924926 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g8j5s" event={"ID":"07c47b88-4b41-47d9-ae48-feacb3431a47","Type":"ContainerStarted","Data":"cede534783e6e2c350a236271ad8ceb0522182830268c1d7d425ce13ea3ad92b"} Dec 13 06:35:53 crc kubenswrapper[5048]: I1213 06:35:53.947022 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-g8j5s" podStartSLOduration=4.303100358 podStartE2EDuration="53.947001392s" podCreationTimestamp="2025-12-13 06:35:00 +0000 UTC" firstStartedPulling="2025-12-13 06:35:03.548838677 +0000 UTC m=+337.415433258" lastFinishedPulling="2025-12-13 06:35:53.192739711 +0000 UTC m=+387.059334292" observedRunningTime="2025-12-13 06:35:53.945690025 +0000 UTC m=+387.812284626" watchObservedRunningTime="2025-12-13 06:35:53.947001392 +0000 UTC m=+387.813595983" Dec 13 06:35:53 crc kubenswrapper[5048]: I1213 06:35:53.967415 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bgnnj" Dec 13 06:35:53 crc kubenswrapper[5048]: I1213 06:35:53.974208 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dqz4m" Dec 13 06:36:01 crc kubenswrapper[5048]: I1213 06:36:01.014305 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-g8j5s" Dec 13 06:36:01 crc kubenswrapper[5048]: I1213 06:36:01.014949 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-g8j5s" Dec 13 06:36:01 crc kubenswrapper[5048]: I1213 06:36:01.053100 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-g8j5s" Dec 13 06:36:02 crc kubenswrapper[5048]: I1213 06:36:02.009507 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-g8j5s" Dec 13 06:36:16 crc kubenswrapper[5048]: I1213 06:36:16.216311 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:36:16 crc kubenswrapper[5048]: I1213 06:36:16.216983 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:36:46 crc kubenswrapper[5048]: I1213 06:36:46.216475 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:36:46 crc kubenswrapper[5048]: I1213 06:36:46.217103 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:36:46 crc kubenswrapper[5048]: I1213 06:36:46.217152 5048 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 06:36:46 crc kubenswrapper[5048]: I1213 06:36:46.217772 5048 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b5193378b65ca2050a83545e0498527657cf2baaa1e10184e3d174b0ace1867e"} pod="openshift-machine-config-operator/machine-config-daemon-j7hns" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 13 06:36:46 crc kubenswrapper[5048]: I1213 06:36:46.217829 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" containerID="cri-o://b5193378b65ca2050a83545e0498527657cf2baaa1e10184e3d174b0ace1867e" gracePeriod=600 Dec 13 06:36:47 crc kubenswrapper[5048]: I1213 06:36:47.221138 5048 generic.go:334] "Generic (PLEG): container finished" podID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerID="b5193378b65ca2050a83545e0498527657cf2baaa1e10184e3d174b0ace1867e" exitCode=0 Dec 13 06:36:47 crc kubenswrapper[5048]: I1213 06:36:47.221233 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerDied","Data":"b5193378b65ca2050a83545e0498527657cf2baaa1e10184e3d174b0ace1867e"} Dec 13 06:36:47 crc kubenswrapper[5048]: I1213 06:36:47.221623 5048 scope.go:117] "RemoveContainer" containerID="a6255a8a01526d521df40075d500f17e184874682f3ac3f62744a51ecebc0a95" Dec 13 06:36:49 crc kubenswrapper[5048]: I1213 06:36:49.234804 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerStarted","Data":"43141bab8f04179c1cc49c5c276db3116f7a7de3d0b4075641053d18b79b3930"} Dec 13 06:38:29 crc kubenswrapper[5048]: I1213 06:38:29.541881 5048 scope.go:117] "RemoveContainer" containerID="13dbd6ab48b35ce49c0a111ba45edf5d061a07c76da1475d3f49afa279f38915" Dec 13 06:39:16 crc kubenswrapper[5048]: I1213 06:39:16.216552 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:39:16 crc kubenswrapper[5048]: I1213 06:39:16.217611 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:39:29 crc kubenswrapper[5048]: I1213 06:39:29.574984 5048 scope.go:117] "RemoveContainer" containerID="5310fa235bb5de9affb9933ae0cef9998b36a58dd6f9ecf3ccdf00d66dde1087" Dec 13 06:39:46 crc kubenswrapper[5048]: I1213 06:39:46.215797 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:39:46 crc kubenswrapper[5048]: I1213 06:39:46.217063 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:40:16 crc kubenswrapper[5048]: I1213 06:40:16.216260 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:40:16 crc kubenswrapper[5048]: I1213 06:40:16.216803 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:40:16 crc kubenswrapper[5048]: I1213 06:40:16.216862 5048 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 06:40:16 crc kubenswrapper[5048]: I1213 06:40:16.217424 5048 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"43141bab8f04179c1cc49c5c276db3116f7a7de3d0b4075641053d18b79b3930"} pod="openshift-machine-config-operator/machine-config-daemon-j7hns" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 13 06:40:16 crc kubenswrapper[5048]: I1213 06:40:16.217512 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" containerID="cri-o://43141bab8f04179c1cc49c5c276db3116f7a7de3d0b4075641053d18b79b3930" gracePeriod=600 Dec 13 06:40:17 crc kubenswrapper[5048]: I1213 06:40:17.332137 5048 generic.go:334] "Generic (PLEG): container finished" podID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerID="43141bab8f04179c1cc49c5c276db3116f7a7de3d0b4075641053d18b79b3930" exitCode=0 Dec 13 06:40:17 crc kubenswrapper[5048]: I1213 06:40:17.332205 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerDied","Data":"43141bab8f04179c1cc49c5c276db3116f7a7de3d0b4075641053d18b79b3930"} Dec 13 06:40:17 crc kubenswrapper[5048]: I1213 06:40:17.332378 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerStarted","Data":"04bb8ca387b7d0e469a66283e3b83e0b6d5378cbf2f1611cc5c1df9ba125a043"} Dec 13 06:40:17 crc kubenswrapper[5048]: I1213 06:40:17.332397 5048 scope.go:117] "RemoveContainer" containerID="b5193378b65ca2050a83545e0498527657cf2baaa1e10184e3d174b0ace1867e" Dec 13 06:40:29 crc kubenswrapper[5048]: I1213 06:40:29.609182 5048 scope.go:117] "RemoveContainer" containerID="7ef4c810674b98a5a6f1511a55a423ec25e57323b26b6420cc673e440efd49dd" Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.364526 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-v5shf"] Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.366341 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-v5shf" Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.372049 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.374791 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8kcd\" (UniqueName: \"kubernetes.io/projected/6a17b3c4-050d-40b0-82d8-d9208365e261-kube-api-access-c8kcd\") pod \"cert-manager-cainjector-7f985d654d-v5shf\" (UID: \"6a17b3c4-050d-40b0-82d8-d9208365e261\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-v5shf" Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.375816 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.378662 5048 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-d9d8r" Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.389306 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-v5shf"] Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.399877 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-tb8lp"] Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.400838 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-tb8lp" Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.403781 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-m5zdt"] Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.403994 5048 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-glrmx" Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.404622 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-m5zdt" Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.411602 5048 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-tb8fw" Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.411879 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-tb8lp"] Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.437191 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-m5zdt"] Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.475609 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xb8qg\" (UniqueName: \"kubernetes.io/projected/fe89c6cd-2d34-47e7-9ff7-cba95295e680-kube-api-access-xb8qg\") pod \"cert-manager-5b446d88c5-tb8lp\" (UID: \"fe89c6cd-2d34-47e7-9ff7-cba95295e680\") " pod="cert-manager/cert-manager-5b446d88c5-tb8lp" Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.475704 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9n9md\" (UniqueName: \"kubernetes.io/projected/2a9b7a7a-7741-4f30-8e7f-a9475784f796-kube-api-access-9n9md\") pod \"cert-manager-webhook-5655c58dd6-m5zdt\" (UID: \"2a9b7a7a-7741-4f30-8e7f-a9475784f796\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-m5zdt" Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.475736 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8kcd\" (UniqueName: \"kubernetes.io/projected/6a17b3c4-050d-40b0-82d8-d9208365e261-kube-api-access-c8kcd\") pod \"cert-manager-cainjector-7f985d654d-v5shf\" (UID: \"6a17b3c4-050d-40b0-82d8-d9208365e261\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-v5shf" Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.494898 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8kcd\" (UniqueName: \"kubernetes.io/projected/6a17b3c4-050d-40b0-82d8-d9208365e261-kube-api-access-c8kcd\") pod \"cert-manager-cainjector-7f985d654d-v5shf\" (UID: \"6a17b3c4-050d-40b0-82d8-d9208365e261\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-v5shf" Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.577323 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xb8qg\" (UniqueName: \"kubernetes.io/projected/fe89c6cd-2d34-47e7-9ff7-cba95295e680-kube-api-access-xb8qg\") pod \"cert-manager-5b446d88c5-tb8lp\" (UID: \"fe89c6cd-2d34-47e7-9ff7-cba95295e680\") " pod="cert-manager/cert-manager-5b446d88c5-tb8lp" Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.577816 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9n9md\" (UniqueName: \"kubernetes.io/projected/2a9b7a7a-7741-4f30-8e7f-a9475784f796-kube-api-access-9n9md\") pod \"cert-manager-webhook-5655c58dd6-m5zdt\" (UID: \"2a9b7a7a-7741-4f30-8e7f-a9475784f796\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-m5zdt" Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.596310 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9n9md\" (UniqueName: \"kubernetes.io/projected/2a9b7a7a-7741-4f30-8e7f-a9475784f796-kube-api-access-9n9md\") pod \"cert-manager-webhook-5655c58dd6-m5zdt\" (UID: \"2a9b7a7a-7741-4f30-8e7f-a9475784f796\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-m5zdt" Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.604157 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xb8qg\" (UniqueName: \"kubernetes.io/projected/fe89c6cd-2d34-47e7-9ff7-cba95295e680-kube-api-access-xb8qg\") pod \"cert-manager-5b446d88c5-tb8lp\" (UID: \"fe89c6cd-2d34-47e7-9ff7-cba95295e680\") " pod="cert-manager/cert-manager-5b446d88c5-tb8lp" Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.682512 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-v5shf" Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.715474 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-tb8lp" Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.736171 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-m5zdt" Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.917380 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-v5shf"] Dec 13 06:41:29 crc kubenswrapper[5048]: I1213 06:41:29.941490 5048 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 13 06:41:30 crc kubenswrapper[5048]: I1213 06:41:30.184886 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-tb8lp"] Dec 13 06:41:30 crc kubenswrapper[5048]: W1213 06:41:30.188361 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfe89c6cd_2d34_47e7_9ff7_cba95295e680.slice/crio-fcf2a9c41750ccf1dcb9e7767faf3915ed4c7694410b49aedfe16df2864a19e1 WatchSource:0}: Error finding container fcf2a9c41750ccf1dcb9e7767faf3915ed4c7694410b49aedfe16df2864a19e1: Status 404 returned error can't find the container with id fcf2a9c41750ccf1dcb9e7767faf3915ed4c7694410b49aedfe16df2864a19e1 Dec 13 06:41:30 crc kubenswrapper[5048]: W1213 06:41:30.190000 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a9b7a7a_7741_4f30_8e7f_a9475784f796.slice/crio-077b5a8924ccd40d33c5171fe29d803a21ba15edce4b2455783d26dede6da7f2 WatchSource:0}: Error finding container 077b5a8924ccd40d33c5171fe29d803a21ba15edce4b2455783d26dede6da7f2: Status 404 returned error can't find the container with id 077b5a8924ccd40d33c5171fe29d803a21ba15edce4b2455783d26dede6da7f2 Dec 13 06:41:30 crc kubenswrapper[5048]: I1213 06:41:30.192179 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-m5zdt"] Dec 13 06:41:30 crc kubenswrapper[5048]: I1213 06:41:30.731662 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-m5zdt" event={"ID":"2a9b7a7a-7741-4f30-8e7f-a9475784f796","Type":"ContainerStarted","Data":"077b5a8924ccd40d33c5171fe29d803a21ba15edce4b2455783d26dede6da7f2"} Dec 13 06:41:30 crc kubenswrapper[5048]: I1213 06:41:30.733870 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-tb8lp" event={"ID":"fe89c6cd-2d34-47e7-9ff7-cba95295e680","Type":"ContainerStarted","Data":"fcf2a9c41750ccf1dcb9e7767faf3915ed4c7694410b49aedfe16df2864a19e1"} Dec 13 06:41:30 crc kubenswrapper[5048]: I1213 06:41:30.735157 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-v5shf" event={"ID":"6a17b3c4-050d-40b0-82d8-d9208365e261","Type":"ContainerStarted","Data":"949a6484e2c27041256e7ff930849a4655f8fda9d377825c09b5a134ef35479d"} Dec 13 06:41:38 crc kubenswrapper[5048]: I1213 06:41:38.780284 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-m5zdt" event={"ID":"2a9b7a7a-7741-4f30-8e7f-a9475784f796","Type":"ContainerStarted","Data":"ad6bccf08d8aef31436b49d6ff96ffe494e4bd769b5f0e3e47352de2d122b461"} Dec 13 06:41:38 crc kubenswrapper[5048]: I1213 06:41:38.780886 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-m5zdt" Dec 13 06:41:38 crc kubenswrapper[5048]: I1213 06:41:38.781870 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-tb8lp" event={"ID":"fe89c6cd-2d34-47e7-9ff7-cba95295e680","Type":"ContainerStarted","Data":"f81075e6b097119c2f40a63be87d2124c79ad63364b9b17f1fd4bcfc7d248fc1"} Dec 13 06:41:38 crc kubenswrapper[5048]: I1213 06:41:38.783246 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-v5shf" event={"ID":"6a17b3c4-050d-40b0-82d8-d9208365e261","Type":"ContainerStarted","Data":"500e6ce47fcfa366eb45bb523b35fa0961b2f4ea3a571119ce77d06ba60d06f9"} Dec 13 06:41:38 crc kubenswrapper[5048]: I1213 06:41:38.796554 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-m5zdt" podStartSLOduration=2.210970303 podStartE2EDuration="9.796537672s" podCreationTimestamp="2025-12-13 06:41:29 +0000 UTC" firstStartedPulling="2025-12-13 06:41:30.192568882 +0000 UTC m=+724.059163463" lastFinishedPulling="2025-12-13 06:41:37.778136251 +0000 UTC m=+731.644730832" observedRunningTime="2025-12-13 06:41:38.792947837 +0000 UTC m=+732.659542438" watchObservedRunningTime="2025-12-13 06:41:38.796537672 +0000 UTC m=+732.663132253" Dec 13 06:41:38 crc kubenswrapper[5048]: I1213 06:41:38.809916 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-tb8lp" podStartSLOduration=2.208376735 podStartE2EDuration="9.809896354s" podCreationTimestamp="2025-12-13 06:41:29 +0000 UTC" firstStartedPulling="2025-12-13 06:41:30.190953009 +0000 UTC m=+724.057547590" lastFinishedPulling="2025-12-13 06:41:37.792472628 +0000 UTC m=+731.659067209" observedRunningTime="2025-12-13 06:41:38.806665518 +0000 UTC m=+732.673260109" watchObservedRunningTime="2025-12-13 06:41:38.809896354 +0000 UTC m=+732.676490935" Dec 13 06:41:38 crc kubenswrapper[5048]: I1213 06:41:38.825044 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-v5shf" podStartSLOduration=1.9974559250000001 podStartE2EDuration="9.825025221s" podCreationTimestamp="2025-12-13 06:41:29 +0000 UTC" firstStartedPulling="2025-12-13 06:41:29.941161927 +0000 UTC m=+723.807756508" lastFinishedPulling="2025-12-13 06:41:37.768731223 +0000 UTC m=+731.635325804" observedRunningTime="2025-12-13 06:41:38.823250085 +0000 UTC m=+732.689844686" watchObservedRunningTime="2025-12-13 06:41:38.825025221 +0000 UTC m=+732.691619802" Dec 13 06:41:39 crc kubenswrapper[5048]: I1213 06:41:39.678211 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hfgcf"] Dec 13 06:41:39 crc kubenswrapper[5048]: I1213 06:41:39.679172 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541" gracePeriod=30 Dec 13 06:41:39 crc kubenswrapper[5048]: I1213 06:41:39.679316 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovn-acl-logging" containerID="cri-o://82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f" gracePeriod=30 Dec 13 06:41:39 crc kubenswrapper[5048]: I1213 06:41:39.679280 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="nbdb" containerID="cri-o://4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f" gracePeriod=30 Dec 13 06:41:39 crc kubenswrapper[5048]: I1213 06:41:39.679375 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="northd" containerID="cri-o://28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767" gracePeriod=30 Dec 13 06:41:39 crc kubenswrapper[5048]: I1213 06:41:39.679516 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="sbdb" containerID="cri-o://584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b" gracePeriod=30 Dec 13 06:41:39 crc kubenswrapper[5048]: I1213 06:41:39.679673 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="kube-rbac-proxy-node" containerID="cri-o://c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8" gracePeriod=30 Dec 13 06:41:39 crc kubenswrapper[5048]: I1213 06:41:39.679057 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovn-controller" containerID="cri-o://61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87" gracePeriod=30 Dec 13 06:41:39 crc kubenswrapper[5048]: I1213 06:41:39.723158 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovnkube-controller" containerID="cri-o://0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a" gracePeriod=30 Dec 13 06:41:39 crc kubenswrapper[5048]: I1213 06:41:39.792678 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-r42c6_627477f3-8fca-4b40-ace9-68d22f6b8576/kube-multus/2.log" Dec 13 06:41:39 crc kubenswrapper[5048]: I1213 06:41:39.793189 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-r42c6_627477f3-8fca-4b40-ace9-68d22f6b8576/kube-multus/1.log" Dec 13 06:41:39 crc kubenswrapper[5048]: I1213 06:41:39.793227 5048 generic.go:334] "Generic (PLEG): container finished" podID="627477f3-8fca-4b40-ace9-68d22f6b8576" containerID="595fa411012d7094d4f57667cd82ebf05e244f51840ad40e113a7580ee0a8b79" exitCode=2 Dec 13 06:41:39 crc kubenswrapper[5048]: I1213 06:41:39.793976 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-r42c6" event={"ID":"627477f3-8fca-4b40-ace9-68d22f6b8576","Type":"ContainerDied","Data":"595fa411012d7094d4f57667cd82ebf05e244f51840ad40e113a7580ee0a8b79"} Dec 13 06:41:39 crc kubenswrapper[5048]: I1213 06:41:39.794027 5048 scope.go:117] "RemoveContainer" containerID="29eacc23b1b0315e6101fa8981ce61752594e1488b26a1dfba5310a6893d0a9e" Dec 13 06:41:39 crc kubenswrapper[5048]: I1213 06:41:39.795099 5048 scope.go:117] "RemoveContainer" containerID="595fa411012d7094d4f57667cd82ebf05e244f51840ad40e113a7580ee0a8b79" Dec 13 06:41:39 crc kubenswrapper[5048]: I1213 06:41:39.996224 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hfgcf_caf986e7-b521-40fd-ae26-18716730d57d/ovnkube-controller/3.log" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.000043 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hfgcf_caf986e7-b521-40fd-ae26-18716730d57d/ovn-acl-logging/0.log" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.000679 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hfgcf_caf986e7-b521-40fd-ae26-18716730d57d/ovn-controller/0.log" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.001458 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.012931 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-kubelet\") pod \"caf986e7-b521-40fd-ae26-18716730d57d\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.012965 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-run-systemd\") pod \"caf986e7-b521-40fd-ae26-18716730d57d\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.012989 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-etc-openvswitch\") pod \"caf986e7-b521-40fd-ae26-18716730d57d\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.013019 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-run-netns\") pod \"caf986e7-b521-40fd-ae26-18716730d57d\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.013036 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-cni-bin\") pod \"caf986e7-b521-40fd-ae26-18716730d57d\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.013058 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-var-lib-openvswitch\") pod \"caf986e7-b521-40fd-ae26-18716730d57d\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.013102 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "caf986e7-b521-40fd-ae26-18716730d57d" (UID: "caf986e7-b521-40fd-ae26-18716730d57d"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.013155 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "caf986e7-b521-40fd-ae26-18716730d57d" (UID: "caf986e7-b521-40fd-ae26-18716730d57d"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.013112 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "caf986e7-b521-40fd-ae26-18716730d57d" (UID: "caf986e7-b521-40fd-ae26-18716730d57d"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.013190 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "caf986e7-b521-40fd-ae26-18716730d57d" (UID: "caf986e7-b521-40fd-ae26-18716730d57d"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.013204 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "caf986e7-b521-40fd-ae26-18716730d57d" (UID: "caf986e7-b521-40fd-ae26-18716730d57d"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.013234 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "caf986e7-b521-40fd-ae26-18716730d57d" (UID: "caf986e7-b521-40fd-ae26-18716730d57d"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.013077 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-run-ovn\") pod \"caf986e7-b521-40fd-ae26-18716730d57d\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014122 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/caf986e7-b521-40fd-ae26-18716730d57d-ovnkube-script-lib\") pod \"caf986e7-b521-40fd-ae26-18716730d57d\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014148 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/caf986e7-b521-40fd-ae26-18716730d57d-env-overrides\") pod \"caf986e7-b521-40fd-ae26-18716730d57d\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014179 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-var-lib-cni-networks-ovn-kubernetes\") pod \"caf986e7-b521-40fd-ae26-18716730d57d\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014203 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-systemd-units\") pod \"caf986e7-b521-40fd-ae26-18716730d57d\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014274 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-run-ovn-kubernetes\") pod \"caf986e7-b521-40fd-ae26-18716730d57d\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014295 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9blq5\" (UniqueName: \"kubernetes.io/projected/caf986e7-b521-40fd-ae26-18716730d57d-kube-api-access-9blq5\") pod \"caf986e7-b521-40fd-ae26-18716730d57d\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014316 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-log-socket\") pod \"caf986e7-b521-40fd-ae26-18716730d57d\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014336 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-slash\") pod \"caf986e7-b521-40fd-ae26-18716730d57d\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014358 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-run-openvswitch\") pod \"caf986e7-b521-40fd-ae26-18716730d57d\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014382 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-cni-netd\") pod \"caf986e7-b521-40fd-ae26-18716730d57d\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014411 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/caf986e7-b521-40fd-ae26-18716730d57d-ovnkube-config\") pod \"caf986e7-b521-40fd-ae26-18716730d57d\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014429 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-node-log\") pod \"caf986e7-b521-40fd-ae26-18716730d57d\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014473 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/caf986e7-b521-40fd-ae26-18716730d57d-ovn-node-metrics-cert\") pod \"caf986e7-b521-40fd-ae26-18716730d57d\" (UID: \"caf986e7-b521-40fd-ae26-18716730d57d\") " Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014518 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-slash" (OuterVolumeSpecName: "host-slash") pod "caf986e7-b521-40fd-ae26-18716730d57d" (UID: "caf986e7-b521-40fd-ae26-18716730d57d"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014550 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "caf986e7-b521-40fd-ae26-18716730d57d" (UID: "caf986e7-b521-40fd-ae26-18716730d57d"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014569 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "caf986e7-b521-40fd-ae26-18716730d57d" (UID: "caf986e7-b521-40fd-ae26-18716730d57d"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014457 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "caf986e7-b521-40fd-ae26-18716730d57d" (UID: "caf986e7-b521-40fd-ae26-18716730d57d"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014696 5048 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-kubelet\") on node \"crc\" DevicePath \"\"" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014714 5048 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014722 5048 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-run-netns\") on node \"crc\" DevicePath \"\"" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014730 5048 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-cni-bin\") on node \"crc\" DevicePath \"\"" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014738 5048 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014746 5048 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-run-ovn\") on node \"crc\" DevicePath \"\"" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014755 5048 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-systemd-units\") on node \"crc\" DevicePath \"\"" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014763 5048 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014771 5048 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-slash\") on node \"crc\" DevicePath \"\"" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014775 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/caf986e7-b521-40fd-ae26-18716730d57d-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "caf986e7-b521-40fd-ae26-18716730d57d" (UID: "caf986e7-b521-40fd-ae26-18716730d57d"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014801 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-node-log" (OuterVolumeSpecName: "node-log") pod "caf986e7-b521-40fd-ae26-18716730d57d" (UID: "caf986e7-b521-40fd-ae26-18716730d57d"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014818 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-log-socket" (OuterVolumeSpecName: "log-socket") pod "caf986e7-b521-40fd-ae26-18716730d57d" (UID: "caf986e7-b521-40fd-ae26-18716730d57d"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014867 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "caf986e7-b521-40fd-ae26-18716730d57d" (UID: "caf986e7-b521-40fd-ae26-18716730d57d"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.014900 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "caf986e7-b521-40fd-ae26-18716730d57d" (UID: "caf986e7-b521-40fd-ae26-18716730d57d"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.015201 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/caf986e7-b521-40fd-ae26-18716730d57d-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "caf986e7-b521-40fd-ae26-18716730d57d" (UID: "caf986e7-b521-40fd-ae26-18716730d57d"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.015557 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/caf986e7-b521-40fd-ae26-18716730d57d-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "caf986e7-b521-40fd-ae26-18716730d57d" (UID: "caf986e7-b521-40fd-ae26-18716730d57d"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.021256 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caf986e7-b521-40fd-ae26-18716730d57d-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "caf986e7-b521-40fd-ae26-18716730d57d" (UID: "caf986e7-b521-40fd-ae26-18716730d57d"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.023944 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/caf986e7-b521-40fd-ae26-18716730d57d-kube-api-access-9blq5" (OuterVolumeSpecName: "kube-api-access-9blq5") pod "caf986e7-b521-40fd-ae26-18716730d57d" (UID: "caf986e7-b521-40fd-ae26-18716730d57d"). InnerVolumeSpecName "kube-api-access-9blq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.039108 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "caf986e7-b521-40fd-ae26-18716730d57d" (UID: "caf986e7-b521-40fd-ae26-18716730d57d"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.059550 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-88bmh"] Dec 13 06:41:40 crc kubenswrapper[5048]: E1213 06:41:40.059916 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovnkube-controller" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.059952 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovnkube-controller" Dec 13 06:41:40 crc kubenswrapper[5048]: E1213 06:41:40.059963 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="kube-rbac-proxy-ovn-metrics" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.059973 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="kube-rbac-proxy-ovn-metrics" Dec 13 06:41:40 crc kubenswrapper[5048]: E1213 06:41:40.059987 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovnkube-controller" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.059995 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovnkube-controller" Dec 13 06:41:40 crc kubenswrapper[5048]: E1213 06:41:40.060005 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="nbdb" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.060013 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="nbdb" Dec 13 06:41:40 crc kubenswrapper[5048]: E1213 06:41:40.060024 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovn-controller" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.060034 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovn-controller" Dec 13 06:41:40 crc kubenswrapper[5048]: E1213 06:41:40.060044 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="kube-rbac-proxy-node" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.060054 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="kube-rbac-proxy-node" Dec 13 06:41:40 crc kubenswrapper[5048]: E1213 06:41:40.060069 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovnkube-controller" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.060077 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovnkube-controller" Dec 13 06:41:40 crc kubenswrapper[5048]: E1213 06:41:40.060086 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="sbdb" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.060094 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="sbdb" Dec 13 06:41:40 crc kubenswrapper[5048]: E1213 06:41:40.060105 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="northd" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.060113 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="northd" Dec 13 06:41:40 crc kubenswrapper[5048]: E1213 06:41:40.060124 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovn-acl-logging" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.060131 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovn-acl-logging" Dec 13 06:41:40 crc kubenswrapper[5048]: E1213 06:41:40.060141 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="kubecfg-setup" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.060149 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="kubecfg-setup" Dec 13 06:41:40 crc kubenswrapper[5048]: E1213 06:41:40.060163 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovnkube-controller" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.060169 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovnkube-controller" Dec 13 06:41:40 crc kubenswrapper[5048]: E1213 06:41:40.060181 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovnkube-controller" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.060191 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovnkube-controller" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.061018 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovnkube-controller" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.061045 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="nbdb" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.061059 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="kube-rbac-proxy-node" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.061068 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="sbdb" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.061078 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovnkube-controller" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.061084 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="kube-rbac-proxy-ovn-metrics" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.061095 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="northd" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.061103 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovn-acl-logging" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.061111 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovn-controller" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.061120 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovnkube-controller" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.061343 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovnkube-controller" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.061365 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="caf986e7-b521-40fd-ae26-18716730d57d" containerName="ovnkube-controller" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.063509 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.115904 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-host-run-netns\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.115968 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4820daf9-7be7-4cd9-8473-44aaf0af242e-ovn-node-metrics-cert\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.115993 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-host-cni-bin\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.116078 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-host-cni-netd\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.116111 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-node-log\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.116132 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4820daf9-7be7-4cd9-8473-44aaf0af242e-ovnkube-config\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.116159 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7k9kl\" (UniqueName: \"kubernetes.io/projected/4820daf9-7be7-4cd9-8473-44aaf0af242e-kube-api-access-7k9kl\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.116230 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-run-systemd\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.116284 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4820daf9-7be7-4cd9-8473-44aaf0af242e-env-overrides\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.116305 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-var-lib-openvswitch\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.116365 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-run-ovn\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.116430 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-host-run-ovn-kubernetes\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.116546 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.116574 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-host-slash\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.116596 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-etc-openvswitch\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.116650 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-host-kubelet\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.116678 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-log-socket\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.116741 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-run-openvswitch\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.116805 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-systemd-units\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.116853 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4820daf9-7be7-4cd9-8473-44aaf0af242e-ovnkube-script-lib\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.116940 5048 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/caf986e7-b521-40fd-ae26-18716730d57d-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.116961 5048 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/caf986e7-b521-40fd-ae26-18716730d57d-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.116975 5048 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.116989 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9blq5\" (UniqueName: \"kubernetes.io/projected/caf986e7-b521-40fd-ae26-18716730d57d-kube-api-access-9blq5\") on node \"crc\" DevicePath \"\"" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.116999 5048 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-log-socket\") on node \"crc\" DevicePath \"\"" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.117008 5048 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-run-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.117016 5048 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-host-cni-netd\") on node \"crc\" DevicePath \"\"" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.117024 5048 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/caf986e7-b521-40fd-ae26-18716730d57d-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.117033 5048 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-node-log\") on node \"crc\" DevicePath \"\"" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.117041 5048 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/caf986e7-b521-40fd-ae26-18716730d57d-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.117051 5048 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/caf986e7-b521-40fd-ae26-18716730d57d-run-systemd\") on node \"crc\" DevicePath \"\"" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.218304 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-systemd-units\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.218397 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4820daf9-7be7-4cd9-8473-44aaf0af242e-ovnkube-script-lib\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.218432 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-host-run-netns\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.218454 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4820daf9-7be7-4cd9-8473-44aaf0af242e-ovn-node-metrics-cert\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.218498 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-host-cni-bin\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.218531 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-host-cni-netd\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.218557 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-node-log\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.218591 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4820daf9-7be7-4cd9-8473-44aaf0af242e-ovnkube-config\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.218623 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7k9kl\" (UniqueName: \"kubernetes.io/projected/4820daf9-7be7-4cd9-8473-44aaf0af242e-kube-api-access-7k9kl\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.218658 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-run-systemd\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.218683 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4820daf9-7be7-4cd9-8473-44aaf0af242e-env-overrides\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.218694 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-host-run-netns\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.218715 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-var-lib-openvswitch\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.218776 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-var-lib-openvswitch\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.218836 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-host-cni-bin\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.218874 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-host-cni-netd\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.218873 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-run-ovn\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.218900 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-node-log\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.218946 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-host-run-ovn-kubernetes\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.218974 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.219007 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-host-slash\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.219029 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-etc-openvswitch\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.219079 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-host-kubelet\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.219125 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-log-socket\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.219164 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-run-openvswitch\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.219236 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-run-systemd\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.219320 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-run-openvswitch\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.219361 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-run-ovn\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.219391 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-host-run-ovn-kubernetes\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.219418 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.219470 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-host-slash\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.219499 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-etc-openvswitch\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.219528 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-host-kubelet\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.219555 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-log-socket\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.220344 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4820daf9-7be7-4cd9-8473-44aaf0af242e-ovnkube-config\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.220444 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4820daf9-7be7-4cd9-8473-44aaf0af242e-env-overrides\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.222891 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4820daf9-7be7-4cd9-8473-44aaf0af242e-ovn-node-metrics-cert\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.243400 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7k9kl\" (UniqueName: \"kubernetes.io/projected/4820daf9-7be7-4cd9-8473-44aaf0af242e-kube-api-access-7k9kl\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.244515 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4820daf9-7be7-4cd9-8473-44aaf0af242e-systemd-units\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.245060 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4820daf9-7be7-4cd9-8473-44aaf0af242e-ovnkube-script-lib\") pod \"ovnkube-node-88bmh\" (UID: \"4820daf9-7be7-4cd9-8473-44aaf0af242e\") " pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.379365 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:40 crc kubenswrapper[5048]: W1213 06:41:40.419489 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4820daf9_7be7_4cd9_8473_44aaf0af242e.slice/crio-882632a7470775f8040d03996f7c8d15fdc46ceaa2083d23a70e3a380cbe02d8 WatchSource:0}: Error finding container 882632a7470775f8040d03996f7c8d15fdc46ceaa2083d23a70e3a380cbe02d8: Status 404 returned error can't find the container with id 882632a7470775f8040d03996f7c8d15fdc46ceaa2083d23a70e3a380cbe02d8 Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.800127 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-r42c6_627477f3-8fca-4b40-ace9-68d22f6b8576/kube-multus/2.log" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.800225 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-r42c6" event={"ID":"627477f3-8fca-4b40-ace9-68d22f6b8576","Type":"ContainerStarted","Data":"44534b0b7aa76c22b725583a39668eb1b13949a1d475992f3bdf64111724db34"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.803346 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hfgcf_caf986e7-b521-40fd-ae26-18716730d57d/ovnkube-controller/3.log" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.805756 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hfgcf_caf986e7-b521-40fd-ae26-18716730d57d/ovn-acl-logging/0.log" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806195 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hfgcf_caf986e7-b521-40fd-ae26-18716730d57d/ovn-controller/0.log" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806613 5048 generic.go:334] "Generic (PLEG): container finished" podID="caf986e7-b521-40fd-ae26-18716730d57d" containerID="0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a" exitCode=0 Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806640 5048 generic.go:334] "Generic (PLEG): container finished" podID="caf986e7-b521-40fd-ae26-18716730d57d" containerID="584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b" exitCode=0 Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806650 5048 generic.go:334] "Generic (PLEG): container finished" podID="caf986e7-b521-40fd-ae26-18716730d57d" containerID="4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f" exitCode=0 Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806659 5048 generic.go:334] "Generic (PLEG): container finished" podID="caf986e7-b521-40fd-ae26-18716730d57d" containerID="28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767" exitCode=0 Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806659 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerDied","Data":"0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806669 5048 generic.go:334] "Generic (PLEG): container finished" podID="caf986e7-b521-40fd-ae26-18716730d57d" containerID="ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541" exitCode=0 Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806713 5048 scope.go:117] "RemoveContainer" containerID="0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806723 5048 generic.go:334] "Generic (PLEG): container finished" podID="caf986e7-b521-40fd-ae26-18716730d57d" containerID="c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8" exitCode=0 Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806740 5048 generic.go:334] "Generic (PLEG): container finished" podID="caf986e7-b521-40fd-ae26-18716730d57d" containerID="82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f" exitCode=143 Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806751 5048 generic.go:334] "Generic (PLEG): container finished" podID="caf986e7-b521-40fd-ae26-18716730d57d" containerID="61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87" exitCode=143 Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806754 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806697 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerDied","Data":"584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806801 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerDied","Data":"4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806834 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerDied","Data":"28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806849 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerDied","Data":"ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806861 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerDied","Data":"c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806875 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806887 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806894 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806901 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806910 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806917 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806924 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806930 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806937 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806947 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerDied","Data":"82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806958 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806966 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806972 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806979 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806985 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806992 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.806998 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807005 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807011 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807018 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807028 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerDied","Data":"61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807037 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807047 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807055 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807062 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807069 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807076 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807083 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807089 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807096 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807103 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807115 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hfgcf" event={"ID":"caf986e7-b521-40fd-ae26-18716730d57d","Type":"ContainerDied","Data":"6c40d792533fafdf709bb32a6297988b37deba8d255553e27dd630c6b82eeec7"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807126 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807137 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807144 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807151 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807158 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807164 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807170 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807177 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807183 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.807189 5048 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.809157 5048 generic.go:334] "Generic (PLEG): container finished" podID="4820daf9-7be7-4cd9-8473-44aaf0af242e" containerID="8ca82c21bef210866bfb4fb7846c8cfad135482373d7d0a4237764ad37b43505" exitCode=0 Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.809182 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" event={"ID":"4820daf9-7be7-4cd9-8473-44aaf0af242e","Type":"ContainerDied","Data":"8ca82c21bef210866bfb4fb7846c8cfad135482373d7d0a4237764ad37b43505"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.809233 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" event={"ID":"4820daf9-7be7-4cd9-8473-44aaf0af242e","Type":"ContainerStarted","Data":"882632a7470775f8040d03996f7c8d15fdc46ceaa2083d23a70e3a380cbe02d8"} Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.844220 5048 scope.go:117] "RemoveContainer" containerID="4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.875935 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hfgcf"] Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.877009 5048 scope.go:117] "RemoveContainer" containerID="584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.879388 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hfgcf"] Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.907535 5048 scope.go:117] "RemoveContainer" containerID="4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.921751 5048 scope.go:117] "RemoveContainer" containerID="28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.934550 5048 scope.go:117] "RemoveContainer" containerID="ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.948032 5048 scope.go:117] "RemoveContainer" containerID="c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.961416 5048 scope.go:117] "RemoveContainer" containerID="82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f" Dec 13 06:41:40 crc kubenswrapper[5048]: I1213 06:41:40.982395 5048 scope.go:117] "RemoveContainer" containerID="61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.010197 5048 scope.go:117] "RemoveContainer" containerID="1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.032720 5048 scope.go:117] "RemoveContainer" containerID="0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a" Dec 13 06:41:41 crc kubenswrapper[5048]: E1213 06:41:41.041619 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a\": container with ID starting with 0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a not found: ID does not exist" containerID="0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.041676 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a"} err="failed to get container status \"0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a\": rpc error: code = NotFound desc = could not find container \"0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a\": container with ID starting with 0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.041711 5048 scope.go:117] "RemoveContainer" containerID="4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2" Dec 13 06:41:41 crc kubenswrapper[5048]: E1213 06:41:41.042081 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2\": container with ID starting with 4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2 not found: ID does not exist" containerID="4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.042122 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2"} err="failed to get container status \"4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2\": rpc error: code = NotFound desc = could not find container \"4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2\": container with ID starting with 4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2 not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.042153 5048 scope.go:117] "RemoveContainer" containerID="584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b" Dec 13 06:41:41 crc kubenswrapper[5048]: E1213 06:41:41.042429 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\": container with ID starting with 584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b not found: ID does not exist" containerID="584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.042702 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b"} err="failed to get container status \"584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\": rpc error: code = NotFound desc = could not find container \"584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\": container with ID starting with 584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.042954 5048 scope.go:117] "RemoveContainer" containerID="4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f" Dec 13 06:41:41 crc kubenswrapper[5048]: E1213 06:41:41.043259 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\": container with ID starting with 4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f not found: ID does not exist" containerID="4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.043282 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f"} err="failed to get container status \"4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\": rpc error: code = NotFound desc = could not find container \"4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\": container with ID starting with 4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.043300 5048 scope.go:117] "RemoveContainer" containerID="28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767" Dec 13 06:41:41 crc kubenswrapper[5048]: E1213 06:41:41.043573 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\": container with ID starting with 28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767 not found: ID does not exist" containerID="28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.043597 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767"} err="failed to get container status \"28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\": rpc error: code = NotFound desc = could not find container \"28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\": container with ID starting with 28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767 not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.043614 5048 scope.go:117] "RemoveContainer" containerID="ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541" Dec 13 06:41:41 crc kubenswrapper[5048]: E1213 06:41:41.044145 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\": container with ID starting with ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541 not found: ID does not exist" containerID="ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.044220 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541"} err="failed to get container status \"ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\": rpc error: code = NotFound desc = could not find container \"ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\": container with ID starting with ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541 not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.044245 5048 scope.go:117] "RemoveContainer" containerID="c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8" Dec 13 06:41:41 crc kubenswrapper[5048]: E1213 06:41:41.047673 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\": container with ID starting with c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8 not found: ID does not exist" containerID="c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.047722 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8"} err="failed to get container status \"c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\": rpc error: code = NotFound desc = could not find container \"c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\": container with ID starting with c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8 not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.047744 5048 scope.go:117] "RemoveContainer" containerID="82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f" Dec 13 06:41:41 crc kubenswrapper[5048]: E1213 06:41:41.048116 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\": container with ID starting with 82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f not found: ID does not exist" containerID="82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.048148 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f"} err="failed to get container status \"82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\": rpc error: code = NotFound desc = could not find container \"82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\": container with ID starting with 82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.048165 5048 scope.go:117] "RemoveContainer" containerID="61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87" Dec 13 06:41:41 crc kubenswrapper[5048]: E1213 06:41:41.048698 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\": container with ID starting with 61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87 not found: ID does not exist" containerID="61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.048724 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87"} err="failed to get container status \"61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\": rpc error: code = NotFound desc = could not find container \"61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\": container with ID starting with 61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87 not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.048742 5048 scope.go:117] "RemoveContainer" containerID="1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df" Dec 13 06:41:41 crc kubenswrapper[5048]: E1213 06:41:41.049137 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\": container with ID starting with 1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df not found: ID does not exist" containerID="1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.049165 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df"} err="failed to get container status \"1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\": rpc error: code = NotFound desc = could not find container \"1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\": container with ID starting with 1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.049186 5048 scope.go:117] "RemoveContainer" containerID="0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.049565 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a"} err="failed to get container status \"0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a\": rpc error: code = NotFound desc = could not find container \"0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a\": container with ID starting with 0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.049588 5048 scope.go:117] "RemoveContainer" containerID="4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.049880 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2"} err="failed to get container status \"4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2\": rpc error: code = NotFound desc = could not find container \"4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2\": container with ID starting with 4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2 not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.049901 5048 scope.go:117] "RemoveContainer" containerID="584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.050247 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b"} err="failed to get container status \"584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\": rpc error: code = NotFound desc = could not find container \"584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\": container with ID starting with 584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.050271 5048 scope.go:117] "RemoveContainer" containerID="4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.050551 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f"} err="failed to get container status \"4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\": rpc error: code = NotFound desc = could not find container \"4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\": container with ID starting with 4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.050573 5048 scope.go:117] "RemoveContainer" containerID="28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.050888 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767"} err="failed to get container status \"28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\": rpc error: code = NotFound desc = could not find container \"28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\": container with ID starting with 28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767 not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.050913 5048 scope.go:117] "RemoveContainer" containerID="ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.051291 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541"} err="failed to get container status \"ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\": rpc error: code = NotFound desc = could not find container \"ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\": container with ID starting with ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541 not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.051315 5048 scope.go:117] "RemoveContainer" containerID="c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.051788 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8"} err="failed to get container status \"c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\": rpc error: code = NotFound desc = could not find container \"c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\": container with ID starting with c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8 not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.051824 5048 scope.go:117] "RemoveContainer" containerID="82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.052215 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f"} err="failed to get container status \"82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\": rpc error: code = NotFound desc = could not find container \"82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\": container with ID starting with 82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.052236 5048 scope.go:117] "RemoveContainer" containerID="61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.052582 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87"} err="failed to get container status \"61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\": rpc error: code = NotFound desc = could not find container \"61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\": container with ID starting with 61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87 not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.052603 5048 scope.go:117] "RemoveContainer" containerID="1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.052850 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df"} err="failed to get container status \"1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\": rpc error: code = NotFound desc = could not find container \"1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\": container with ID starting with 1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.052869 5048 scope.go:117] "RemoveContainer" containerID="0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.053215 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a"} err="failed to get container status \"0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a\": rpc error: code = NotFound desc = could not find container \"0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a\": container with ID starting with 0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.053238 5048 scope.go:117] "RemoveContainer" containerID="4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.053483 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2"} err="failed to get container status \"4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2\": rpc error: code = NotFound desc = could not find container \"4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2\": container with ID starting with 4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2 not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.053505 5048 scope.go:117] "RemoveContainer" containerID="584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.054591 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b"} err="failed to get container status \"584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\": rpc error: code = NotFound desc = could not find container \"584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\": container with ID starting with 584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.054618 5048 scope.go:117] "RemoveContainer" containerID="4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.054925 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f"} err="failed to get container status \"4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\": rpc error: code = NotFound desc = could not find container \"4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\": container with ID starting with 4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.054950 5048 scope.go:117] "RemoveContainer" containerID="28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.055260 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767"} err="failed to get container status \"28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\": rpc error: code = NotFound desc = could not find container \"28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\": container with ID starting with 28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767 not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.055284 5048 scope.go:117] "RemoveContainer" containerID="ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.055695 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541"} err="failed to get container status \"ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\": rpc error: code = NotFound desc = could not find container \"ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\": container with ID starting with ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541 not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.055721 5048 scope.go:117] "RemoveContainer" containerID="c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.057073 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8"} err="failed to get container status \"c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\": rpc error: code = NotFound desc = could not find container \"c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\": container with ID starting with c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8 not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.057107 5048 scope.go:117] "RemoveContainer" containerID="82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.057615 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f"} err="failed to get container status \"82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\": rpc error: code = NotFound desc = could not find container \"82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\": container with ID starting with 82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.057640 5048 scope.go:117] "RemoveContainer" containerID="61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.058080 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87"} err="failed to get container status \"61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\": rpc error: code = NotFound desc = could not find container \"61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\": container with ID starting with 61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87 not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.058107 5048 scope.go:117] "RemoveContainer" containerID="1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.058499 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df"} err="failed to get container status \"1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\": rpc error: code = NotFound desc = could not find container \"1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\": container with ID starting with 1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.058523 5048 scope.go:117] "RemoveContainer" containerID="0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.058753 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a"} err="failed to get container status \"0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a\": rpc error: code = NotFound desc = could not find container \"0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a\": container with ID starting with 0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.058769 5048 scope.go:117] "RemoveContainer" containerID="4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.059012 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2"} err="failed to get container status \"4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2\": rpc error: code = NotFound desc = could not find container \"4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2\": container with ID starting with 4898d8329d686b48431a6584164acb763d1f5890962ea21bc4c31f4e435233c2 not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.059037 5048 scope.go:117] "RemoveContainer" containerID="584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.059227 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b"} err="failed to get container status \"584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\": rpc error: code = NotFound desc = could not find container \"584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b\": container with ID starting with 584ea10d2a0bb1a9a3d8fba4052477f6deb3c8ef24dc09b44f2fc21140e7425b not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.059243 5048 scope.go:117] "RemoveContainer" containerID="4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.059649 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f"} err="failed to get container status \"4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\": rpc error: code = NotFound desc = could not find container \"4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f\": container with ID starting with 4063c8378f9c9ffe9718544ef2f38897860fa4cf390ea05280c66a359716730f not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.059673 5048 scope.go:117] "RemoveContainer" containerID="28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.059926 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767"} err="failed to get container status \"28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\": rpc error: code = NotFound desc = could not find container \"28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767\": container with ID starting with 28a89bb17bf2563cb1b81ba7e306e3ab0cd3a936feec27450434188461840767 not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.059950 5048 scope.go:117] "RemoveContainer" containerID="ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.060192 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541"} err="failed to get container status \"ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\": rpc error: code = NotFound desc = could not find container \"ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541\": container with ID starting with ee6900f4052a54170ffba47984ca763c21601a6039309c6a1327f8ef43677541 not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.060212 5048 scope.go:117] "RemoveContainer" containerID="c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.060442 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8"} err="failed to get container status \"c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\": rpc error: code = NotFound desc = could not find container \"c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8\": container with ID starting with c03acaaa3bbb4d97bc14100f8670abe2b18a0ec9be72f01a418fc57c59c20fa8 not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.060477 5048 scope.go:117] "RemoveContainer" containerID="82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.060699 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f"} err="failed to get container status \"82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\": rpc error: code = NotFound desc = could not find container \"82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f\": container with ID starting with 82a17b3ada375570b51d21e0fa16641943d16de570e5d783628fb5248413126f not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.060718 5048 scope.go:117] "RemoveContainer" containerID="61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.060933 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87"} err="failed to get container status \"61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\": rpc error: code = NotFound desc = could not find container \"61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87\": container with ID starting with 61afa23153d8e210c3b2c31c4857fc227aca119b11c7073da9e3f26c237ddf87 not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.060954 5048 scope.go:117] "RemoveContainer" containerID="1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.061263 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df"} err="failed to get container status \"1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\": rpc error: code = NotFound desc = could not find container \"1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df\": container with ID starting with 1bef33549e0a82040b8ac6c4dc7d14d78010f00fc967c1314c459dd3681649df not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.061286 5048 scope.go:117] "RemoveContainer" containerID="0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.061551 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a"} err="failed to get container status \"0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a\": rpc error: code = NotFound desc = could not find container \"0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a\": container with ID starting with 0a177f1f0d21c69f5e5d3f6754642d257a3183cd0c5c26becbe2f0a4d2decc7a not found: ID does not exist" Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.820506 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" event={"ID":"4820daf9-7be7-4cd9-8473-44aaf0af242e","Type":"ContainerStarted","Data":"ed2255f265828a6af98a623e8f1d6fe21b5cf5a11d36166d3f518ec8031db121"} Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.821174 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" event={"ID":"4820daf9-7be7-4cd9-8473-44aaf0af242e","Type":"ContainerStarted","Data":"e8802e53fef3e1ab670fcbf48b30cbc15f578c58ef2432517fdc29d01f5d51aa"} Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.821198 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" event={"ID":"4820daf9-7be7-4cd9-8473-44aaf0af242e","Type":"ContainerStarted","Data":"98cf89b001588bb2a6fd9d4b74040d8e2b7632adfba025b7120507ca87a75948"} Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.821212 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" event={"ID":"4820daf9-7be7-4cd9-8473-44aaf0af242e","Type":"ContainerStarted","Data":"3aaf86801b388d18a213bbbeac4e711d3e7c39a9c35ad5380ee7934891cec770"} Dec 13 06:41:41 crc kubenswrapper[5048]: I1213 06:41:41.821223 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" event={"ID":"4820daf9-7be7-4cd9-8473-44aaf0af242e","Type":"ContainerStarted","Data":"0381f24c33152759078f1195229767a3325ff318b74b0d3cb51fa309b37312ee"} Dec 13 06:41:42 crc kubenswrapper[5048]: I1213 06:41:42.573867 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="caf986e7-b521-40fd-ae26-18716730d57d" path="/var/lib/kubelet/pods/caf986e7-b521-40fd-ae26-18716730d57d/volumes" Dec 13 06:41:42 crc kubenswrapper[5048]: I1213 06:41:42.830445 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" event={"ID":"4820daf9-7be7-4cd9-8473-44aaf0af242e","Type":"ContainerStarted","Data":"f1f3c7d099870cc4b605097bcca0bae1d239be8a37b419720af70378ffb23b31"} Dec 13 06:41:43 crc kubenswrapper[5048]: I1213 06:41:43.838813 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" event={"ID":"4820daf9-7be7-4cd9-8473-44aaf0af242e","Type":"ContainerStarted","Data":"0c573c5666f6f50a0b3d7786bca59c4c9202f88c018e43d155269944e8f9c256"} Dec 13 06:41:44 crc kubenswrapper[5048]: I1213 06:41:44.740814 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-m5zdt" Dec 13 06:41:45 crc kubenswrapper[5048]: I1213 06:41:45.852815 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" event={"ID":"4820daf9-7be7-4cd9-8473-44aaf0af242e","Type":"ContainerStarted","Data":"b29324ee8dc2a2c38363af10f5230f72d5a06f61698692e658c92b7dbb5dbe1b"} Dec 13 06:41:45 crc kubenswrapper[5048]: I1213 06:41:45.854114 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:45 crc kubenswrapper[5048]: I1213 06:41:45.854135 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:45 crc kubenswrapper[5048]: I1213 06:41:45.854143 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:45 crc kubenswrapper[5048]: I1213 06:41:45.881576 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:41:45 crc kubenswrapper[5048]: I1213 06:41:45.886626 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" podStartSLOduration=5.886604723 podStartE2EDuration="5.886604723s" podCreationTimestamp="2025-12-13 06:41:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:41:45.880622215 +0000 UTC m=+739.747216816" watchObservedRunningTime="2025-12-13 06:41:45.886604723 +0000 UTC m=+739.753199304" Dec 13 06:41:45 crc kubenswrapper[5048]: I1213 06:41:45.898057 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:42:09 crc kubenswrapper[5048]: I1213 06:42:09.672249 5048 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 13 06:42:10 crc kubenswrapper[5048]: I1213 06:42:10.401728 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-88bmh" Dec 13 06:42:16 crc kubenswrapper[5048]: I1213 06:42:16.216668 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:42:16 crc kubenswrapper[5048]: I1213 06:42:16.218226 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:42:34 crc kubenswrapper[5048]: I1213 06:42:34.482559 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts"] Dec 13 06:42:34 crc kubenswrapper[5048]: I1213 06:42:34.483975 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts" Dec 13 06:42:34 crc kubenswrapper[5048]: I1213 06:42:34.485659 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 13 06:42:34 crc kubenswrapper[5048]: I1213 06:42:34.491854 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts"] Dec 13 06:42:34 crc kubenswrapper[5048]: I1213 06:42:34.666513 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4f04fec8-b2a6-4634-b1af-0c47285bad86-bundle\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts\" (UID: \"4f04fec8-b2a6-4634-b1af-0c47285bad86\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts" Dec 13 06:42:34 crc kubenswrapper[5048]: I1213 06:42:34.666676 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4f04fec8-b2a6-4634-b1af-0c47285bad86-util\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts\" (UID: \"4f04fec8-b2a6-4634-b1af-0c47285bad86\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts" Dec 13 06:42:34 crc kubenswrapper[5048]: I1213 06:42:34.666711 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8x6rt\" (UniqueName: \"kubernetes.io/projected/4f04fec8-b2a6-4634-b1af-0c47285bad86-kube-api-access-8x6rt\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts\" (UID: \"4f04fec8-b2a6-4634-b1af-0c47285bad86\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts" Dec 13 06:42:34 crc kubenswrapper[5048]: I1213 06:42:34.767778 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4f04fec8-b2a6-4634-b1af-0c47285bad86-util\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts\" (UID: \"4f04fec8-b2a6-4634-b1af-0c47285bad86\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts" Dec 13 06:42:34 crc kubenswrapper[5048]: I1213 06:42:34.767823 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8x6rt\" (UniqueName: \"kubernetes.io/projected/4f04fec8-b2a6-4634-b1af-0c47285bad86-kube-api-access-8x6rt\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts\" (UID: \"4f04fec8-b2a6-4634-b1af-0c47285bad86\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts" Dec 13 06:42:34 crc kubenswrapper[5048]: I1213 06:42:34.767859 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4f04fec8-b2a6-4634-b1af-0c47285bad86-bundle\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts\" (UID: \"4f04fec8-b2a6-4634-b1af-0c47285bad86\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts" Dec 13 06:42:34 crc kubenswrapper[5048]: I1213 06:42:34.768502 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4f04fec8-b2a6-4634-b1af-0c47285bad86-bundle\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts\" (UID: \"4f04fec8-b2a6-4634-b1af-0c47285bad86\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts" Dec 13 06:42:34 crc kubenswrapper[5048]: I1213 06:42:34.769941 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4f04fec8-b2a6-4634-b1af-0c47285bad86-util\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts\" (UID: \"4f04fec8-b2a6-4634-b1af-0c47285bad86\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts" Dec 13 06:42:34 crc kubenswrapper[5048]: I1213 06:42:34.790220 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8x6rt\" (UniqueName: \"kubernetes.io/projected/4f04fec8-b2a6-4634-b1af-0c47285bad86-kube-api-access-8x6rt\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts\" (UID: \"4f04fec8-b2a6-4634-b1af-0c47285bad86\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts" Dec 13 06:42:34 crc kubenswrapper[5048]: I1213 06:42:34.801764 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts" Dec 13 06:42:35 crc kubenswrapper[5048]: I1213 06:42:35.160754 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts"] Dec 13 06:42:36 crc kubenswrapper[5048]: I1213 06:42:36.122205 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts" event={"ID":"4f04fec8-b2a6-4634-b1af-0c47285bad86","Type":"ContainerStarted","Data":"7c4429a768241b5297547a128436f107243e581164da17519fa5949e4dc95f67"} Dec 13 06:42:36 crc kubenswrapper[5048]: I1213 06:42:36.122591 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts" event={"ID":"4f04fec8-b2a6-4634-b1af-0c47285bad86","Type":"ContainerStarted","Data":"cb8b403e4bb97dbc7be868e9696568e5ad797aaed7a7bd6fd0a5330d3302a962"} Dec 13 06:42:36 crc kubenswrapper[5048]: I1213 06:42:36.224300 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nj8jx"] Dec 13 06:42:36 crc kubenswrapper[5048]: I1213 06:42:36.225292 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nj8jx" Dec 13 06:42:36 crc kubenswrapper[5048]: I1213 06:42:36.233363 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nj8jx"] Dec 13 06:42:36 crc kubenswrapper[5048]: I1213 06:42:36.246556 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8549450-c793-4fca-9f52-d7650d704a31-catalog-content\") pod \"redhat-operators-nj8jx\" (UID: \"e8549450-c793-4fca-9f52-d7650d704a31\") " pod="openshift-marketplace/redhat-operators-nj8jx" Dec 13 06:42:36 crc kubenswrapper[5048]: I1213 06:42:36.246625 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vw6pj\" (UniqueName: \"kubernetes.io/projected/e8549450-c793-4fca-9f52-d7650d704a31-kube-api-access-vw6pj\") pod \"redhat-operators-nj8jx\" (UID: \"e8549450-c793-4fca-9f52-d7650d704a31\") " pod="openshift-marketplace/redhat-operators-nj8jx" Dec 13 06:42:36 crc kubenswrapper[5048]: I1213 06:42:36.246708 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8549450-c793-4fca-9f52-d7650d704a31-utilities\") pod \"redhat-operators-nj8jx\" (UID: \"e8549450-c793-4fca-9f52-d7650d704a31\") " pod="openshift-marketplace/redhat-operators-nj8jx" Dec 13 06:42:36 crc kubenswrapper[5048]: I1213 06:42:36.348354 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8549450-c793-4fca-9f52-d7650d704a31-utilities\") pod \"redhat-operators-nj8jx\" (UID: \"e8549450-c793-4fca-9f52-d7650d704a31\") " pod="openshift-marketplace/redhat-operators-nj8jx" Dec 13 06:42:36 crc kubenswrapper[5048]: I1213 06:42:36.348460 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8549450-c793-4fca-9f52-d7650d704a31-catalog-content\") pod \"redhat-operators-nj8jx\" (UID: \"e8549450-c793-4fca-9f52-d7650d704a31\") " pod="openshift-marketplace/redhat-operators-nj8jx" Dec 13 06:42:36 crc kubenswrapper[5048]: I1213 06:42:36.348493 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vw6pj\" (UniqueName: \"kubernetes.io/projected/e8549450-c793-4fca-9f52-d7650d704a31-kube-api-access-vw6pj\") pod \"redhat-operators-nj8jx\" (UID: \"e8549450-c793-4fca-9f52-d7650d704a31\") " pod="openshift-marketplace/redhat-operators-nj8jx" Dec 13 06:42:36 crc kubenswrapper[5048]: I1213 06:42:36.349243 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8549450-c793-4fca-9f52-d7650d704a31-utilities\") pod \"redhat-operators-nj8jx\" (UID: \"e8549450-c793-4fca-9f52-d7650d704a31\") " pod="openshift-marketplace/redhat-operators-nj8jx" Dec 13 06:42:36 crc kubenswrapper[5048]: I1213 06:42:36.349338 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8549450-c793-4fca-9f52-d7650d704a31-catalog-content\") pod \"redhat-operators-nj8jx\" (UID: \"e8549450-c793-4fca-9f52-d7650d704a31\") " pod="openshift-marketplace/redhat-operators-nj8jx" Dec 13 06:42:36 crc kubenswrapper[5048]: I1213 06:42:36.373881 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vw6pj\" (UniqueName: \"kubernetes.io/projected/e8549450-c793-4fca-9f52-d7650d704a31-kube-api-access-vw6pj\") pod \"redhat-operators-nj8jx\" (UID: \"e8549450-c793-4fca-9f52-d7650d704a31\") " pod="openshift-marketplace/redhat-operators-nj8jx" Dec 13 06:42:36 crc kubenswrapper[5048]: I1213 06:42:36.634297 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nj8jx" Dec 13 06:42:37 crc kubenswrapper[5048]: I1213 06:42:37.143980 5048 generic.go:334] "Generic (PLEG): container finished" podID="4f04fec8-b2a6-4634-b1af-0c47285bad86" containerID="7c4429a768241b5297547a128436f107243e581164da17519fa5949e4dc95f67" exitCode=0 Dec 13 06:42:37 crc kubenswrapper[5048]: I1213 06:42:37.144035 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts" event={"ID":"4f04fec8-b2a6-4634-b1af-0c47285bad86","Type":"ContainerDied","Data":"7c4429a768241b5297547a128436f107243e581164da17519fa5949e4dc95f67"} Dec 13 06:42:37 crc kubenswrapper[5048]: I1213 06:42:37.151092 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nj8jx"] Dec 13 06:42:37 crc kubenswrapper[5048]: W1213 06:42:37.170802 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode8549450_c793_4fca_9f52_d7650d704a31.slice/crio-d7e5ab823fec0624c2eefa9ed8ab331bfb0c50f2ccd9b294b4cb1ba2324ace62 WatchSource:0}: Error finding container d7e5ab823fec0624c2eefa9ed8ab331bfb0c50f2ccd9b294b4cb1ba2324ace62: Status 404 returned error can't find the container with id d7e5ab823fec0624c2eefa9ed8ab331bfb0c50f2ccd9b294b4cb1ba2324ace62 Dec 13 06:42:38 crc kubenswrapper[5048]: I1213 06:42:38.149075 5048 generic.go:334] "Generic (PLEG): container finished" podID="e8549450-c793-4fca-9f52-d7650d704a31" containerID="9d8258c06505b2b3a13a526d3586aefc3e3f980860d920d689059e8e819c8d28" exitCode=0 Dec 13 06:42:38 crc kubenswrapper[5048]: I1213 06:42:38.149140 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nj8jx" event={"ID":"e8549450-c793-4fca-9f52-d7650d704a31","Type":"ContainerDied","Data":"9d8258c06505b2b3a13a526d3586aefc3e3f980860d920d689059e8e819c8d28"} Dec 13 06:42:38 crc kubenswrapper[5048]: I1213 06:42:38.149959 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nj8jx" event={"ID":"e8549450-c793-4fca-9f52-d7650d704a31","Type":"ContainerStarted","Data":"d7e5ab823fec0624c2eefa9ed8ab331bfb0c50f2ccd9b294b4cb1ba2324ace62"} Dec 13 06:42:39 crc kubenswrapper[5048]: I1213 06:42:39.156839 5048 generic.go:334] "Generic (PLEG): container finished" podID="4f04fec8-b2a6-4634-b1af-0c47285bad86" containerID="b1142ac3ac8e5bc05890304816fe0375420972a0775b0e84a2de58b17e69532c" exitCode=0 Dec 13 06:42:39 crc kubenswrapper[5048]: I1213 06:42:39.156944 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts" event={"ID":"4f04fec8-b2a6-4634-b1af-0c47285bad86","Type":"ContainerDied","Data":"b1142ac3ac8e5bc05890304816fe0375420972a0775b0e84a2de58b17e69532c"} Dec 13 06:42:39 crc kubenswrapper[5048]: I1213 06:42:39.159136 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nj8jx" event={"ID":"e8549450-c793-4fca-9f52-d7650d704a31","Type":"ContainerStarted","Data":"c21c4c20d67dc369508f48f431af9b8880f8736981aa72da096cc5ab3b002ae3"} Dec 13 06:42:40 crc kubenswrapper[5048]: I1213 06:42:40.167129 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts" event={"ID":"4f04fec8-b2a6-4634-b1af-0c47285bad86","Type":"ContainerStarted","Data":"1d92a89479800125cddaec63dfab0fa4bd892d82a809a5636cdf0589991c3f9c"} Dec 13 06:42:40 crc kubenswrapper[5048]: I1213 06:42:40.195939 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts" podStartSLOduration=4.465759415 podStartE2EDuration="6.195910516s" podCreationTimestamp="2025-12-13 06:42:34 +0000 UTC" firstStartedPulling="2025-12-13 06:42:37.148254955 +0000 UTC m=+791.014849536" lastFinishedPulling="2025-12-13 06:42:38.878406036 +0000 UTC m=+792.745000637" observedRunningTime="2025-12-13 06:42:40.192307964 +0000 UTC m=+794.058902625" watchObservedRunningTime="2025-12-13 06:42:40.195910516 +0000 UTC m=+794.062505147" Dec 13 06:42:41 crc kubenswrapper[5048]: I1213 06:42:41.177153 5048 generic.go:334] "Generic (PLEG): container finished" podID="4f04fec8-b2a6-4634-b1af-0c47285bad86" containerID="1d92a89479800125cddaec63dfab0fa4bd892d82a809a5636cdf0589991c3f9c" exitCode=0 Dec 13 06:42:41 crc kubenswrapper[5048]: I1213 06:42:41.177253 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts" event={"ID":"4f04fec8-b2a6-4634-b1af-0c47285bad86","Type":"ContainerDied","Data":"1d92a89479800125cddaec63dfab0fa4bd892d82a809a5636cdf0589991c3f9c"} Dec 13 06:42:42 crc kubenswrapper[5048]: I1213 06:42:42.818012 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts" Dec 13 06:42:42 crc kubenswrapper[5048]: I1213 06:42:42.960674 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8x6rt\" (UniqueName: \"kubernetes.io/projected/4f04fec8-b2a6-4634-b1af-0c47285bad86-kube-api-access-8x6rt\") pod \"4f04fec8-b2a6-4634-b1af-0c47285bad86\" (UID: \"4f04fec8-b2a6-4634-b1af-0c47285bad86\") " Dec 13 06:42:42 crc kubenswrapper[5048]: I1213 06:42:42.960772 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4f04fec8-b2a6-4634-b1af-0c47285bad86-util\") pod \"4f04fec8-b2a6-4634-b1af-0c47285bad86\" (UID: \"4f04fec8-b2a6-4634-b1af-0c47285bad86\") " Dec 13 06:42:42 crc kubenswrapper[5048]: I1213 06:42:42.960825 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4f04fec8-b2a6-4634-b1af-0c47285bad86-bundle\") pod \"4f04fec8-b2a6-4634-b1af-0c47285bad86\" (UID: \"4f04fec8-b2a6-4634-b1af-0c47285bad86\") " Dec 13 06:42:42 crc kubenswrapper[5048]: I1213 06:42:42.962038 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f04fec8-b2a6-4634-b1af-0c47285bad86-bundle" (OuterVolumeSpecName: "bundle") pod "4f04fec8-b2a6-4634-b1af-0c47285bad86" (UID: "4f04fec8-b2a6-4634-b1af-0c47285bad86"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:42:42 crc kubenswrapper[5048]: I1213 06:42:42.972002 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f04fec8-b2a6-4634-b1af-0c47285bad86-util" (OuterVolumeSpecName: "util") pod "4f04fec8-b2a6-4634-b1af-0c47285bad86" (UID: "4f04fec8-b2a6-4634-b1af-0c47285bad86"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:42:42 crc kubenswrapper[5048]: I1213 06:42:42.972677 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f04fec8-b2a6-4634-b1af-0c47285bad86-kube-api-access-8x6rt" (OuterVolumeSpecName: "kube-api-access-8x6rt") pod "4f04fec8-b2a6-4634-b1af-0c47285bad86" (UID: "4f04fec8-b2a6-4634-b1af-0c47285bad86"). InnerVolumeSpecName "kube-api-access-8x6rt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:42:43 crc kubenswrapper[5048]: I1213 06:42:43.090076 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8x6rt\" (UniqueName: \"kubernetes.io/projected/4f04fec8-b2a6-4634-b1af-0c47285bad86-kube-api-access-8x6rt\") on node \"crc\" DevicePath \"\"" Dec 13 06:42:43 crc kubenswrapper[5048]: I1213 06:42:43.090118 5048 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4f04fec8-b2a6-4634-b1af-0c47285bad86-util\") on node \"crc\" DevicePath \"\"" Dec 13 06:42:43 crc kubenswrapper[5048]: I1213 06:42:43.090129 5048 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4f04fec8-b2a6-4634-b1af-0c47285bad86-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:42:43 crc kubenswrapper[5048]: I1213 06:42:43.191421 5048 generic.go:334] "Generic (PLEG): container finished" podID="e8549450-c793-4fca-9f52-d7650d704a31" containerID="c21c4c20d67dc369508f48f431af9b8880f8736981aa72da096cc5ab3b002ae3" exitCode=0 Dec 13 06:42:43 crc kubenswrapper[5048]: I1213 06:42:43.191538 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nj8jx" event={"ID":"e8549450-c793-4fca-9f52-d7650d704a31","Type":"ContainerDied","Data":"c21c4c20d67dc369508f48f431af9b8880f8736981aa72da096cc5ab3b002ae3"} Dec 13 06:42:43 crc kubenswrapper[5048]: I1213 06:42:43.196775 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts" event={"ID":"4f04fec8-b2a6-4634-b1af-0c47285bad86","Type":"ContainerDied","Data":"cb8b403e4bb97dbc7be868e9696568e5ad797aaed7a7bd6fd0a5330d3302a962"} Dec 13 06:42:43 crc kubenswrapper[5048]: I1213 06:42:43.196887 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts" Dec 13 06:42:43 crc kubenswrapper[5048]: I1213 06:42:43.197019 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cb8b403e4bb97dbc7be868e9696568e5ad797aaed7a7bd6fd0a5330d3302a962" Dec 13 06:42:44 crc kubenswrapper[5048]: I1213 06:42:44.209288 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nj8jx" event={"ID":"e8549450-c793-4fca-9f52-d7650d704a31","Type":"ContainerStarted","Data":"6713eee8dbf281cd953ac042918461fccfcb0596d899c92f08502bdc9b609084"} Dec 13 06:42:44 crc kubenswrapper[5048]: I1213 06:42:44.321221 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nj8jx" podStartSLOduration=2.794464092 podStartE2EDuration="8.321205525s" podCreationTimestamp="2025-12-13 06:42:36 +0000 UTC" firstStartedPulling="2025-12-13 06:42:38.150993652 +0000 UTC m=+792.017588233" lastFinishedPulling="2025-12-13 06:42:43.677735085 +0000 UTC m=+797.544329666" observedRunningTime="2025-12-13 06:42:44.24499309 +0000 UTC m=+798.111587741" watchObservedRunningTime="2025-12-13 06:42:44.321205525 +0000 UTC m=+798.187800096" Dec 13 06:42:44 crc kubenswrapper[5048]: I1213 06:42:44.323403 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-6769fb99d-lk9l5"] Dec 13 06:42:44 crc kubenswrapper[5048]: E1213 06:42:44.323651 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f04fec8-b2a6-4634-b1af-0c47285bad86" containerName="pull" Dec 13 06:42:44 crc kubenswrapper[5048]: I1213 06:42:44.323672 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f04fec8-b2a6-4634-b1af-0c47285bad86" containerName="pull" Dec 13 06:42:44 crc kubenswrapper[5048]: E1213 06:42:44.323682 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f04fec8-b2a6-4634-b1af-0c47285bad86" containerName="extract" Dec 13 06:42:44 crc kubenswrapper[5048]: I1213 06:42:44.323690 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f04fec8-b2a6-4634-b1af-0c47285bad86" containerName="extract" Dec 13 06:42:44 crc kubenswrapper[5048]: E1213 06:42:44.323708 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f04fec8-b2a6-4634-b1af-0c47285bad86" containerName="util" Dec 13 06:42:44 crc kubenswrapper[5048]: I1213 06:42:44.323717 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f04fec8-b2a6-4634-b1af-0c47285bad86" containerName="util" Dec 13 06:42:44 crc kubenswrapper[5048]: I1213 06:42:44.323822 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f04fec8-b2a6-4634-b1af-0c47285bad86" containerName="extract" Dec 13 06:42:44 crc kubenswrapper[5048]: I1213 06:42:44.324247 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-6769fb99d-lk9l5" Dec 13 06:42:44 crc kubenswrapper[5048]: I1213 06:42:44.326928 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-mfphz" Dec 13 06:42:44 crc kubenswrapper[5048]: I1213 06:42:44.327208 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Dec 13 06:42:44 crc kubenswrapper[5048]: I1213 06:42:44.334928 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Dec 13 06:42:44 crc kubenswrapper[5048]: I1213 06:42:44.339555 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-6769fb99d-lk9l5"] Dec 13 06:42:44 crc kubenswrapper[5048]: I1213 06:42:44.505012 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tf89q\" (UniqueName: \"kubernetes.io/projected/ce50f1e5-0177-4f3d-a02a-5af653f70001-kube-api-access-tf89q\") pod \"nmstate-operator-6769fb99d-lk9l5\" (UID: \"ce50f1e5-0177-4f3d-a02a-5af653f70001\") " pod="openshift-nmstate/nmstate-operator-6769fb99d-lk9l5" Dec 13 06:42:44 crc kubenswrapper[5048]: I1213 06:42:44.605845 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tf89q\" (UniqueName: \"kubernetes.io/projected/ce50f1e5-0177-4f3d-a02a-5af653f70001-kube-api-access-tf89q\") pod \"nmstate-operator-6769fb99d-lk9l5\" (UID: \"ce50f1e5-0177-4f3d-a02a-5af653f70001\") " pod="openshift-nmstate/nmstate-operator-6769fb99d-lk9l5" Dec 13 06:42:44 crc kubenswrapper[5048]: I1213 06:42:44.651995 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tf89q\" (UniqueName: \"kubernetes.io/projected/ce50f1e5-0177-4f3d-a02a-5af653f70001-kube-api-access-tf89q\") pod \"nmstate-operator-6769fb99d-lk9l5\" (UID: \"ce50f1e5-0177-4f3d-a02a-5af653f70001\") " pod="openshift-nmstate/nmstate-operator-6769fb99d-lk9l5" Dec 13 06:42:44 crc kubenswrapper[5048]: I1213 06:42:44.942515 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-6769fb99d-lk9l5" Dec 13 06:42:45 crc kubenswrapper[5048]: I1213 06:42:45.309498 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-6769fb99d-lk9l5"] Dec 13 06:42:46 crc kubenswrapper[5048]: I1213 06:42:46.216056 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:42:46 crc kubenswrapper[5048]: I1213 06:42:46.216146 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:42:46 crc kubenswrapper[5048]: I1213 06:42:46.221788 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-6769fb99d-lk9l5" event={"ID":"ce50f1e5-0177-4f3d-a02a-5af653f70001","Type":"ContainerStarted","Data":"9056ea4b16e72f1bde3c46e4e1343369774f189da2dcd25f1eb56b0513bd739c"} Dec 13 06:42:46 crc kubenswrapper[5048]: I1213 06:42:46.634579 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nj8jx" Dec 13 06:42:46 crc kubenswrapper[5048]: I1213 06:42:46.634647 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nj8jx" Dec 13 06:42:47 crc kubenswrapper[5048]: I1213 06:42:47.899868 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nj8jx" podUID="e8549450-c793-4fca-9f52-d7650d704a31" containerName="registry-server" probeResult="failure" output=< Dec 13 06:42:47 crc kubenswrapper[5048]: timeout: failed to connect service ":50051" within 1s Dec 13 06:42:47 crc kubenswrapper[5048]: > Dec 13 06:42:50 crc kubenswrapper[5048]: I1213 06:42:50.253880 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-6769fb99d-lk9l5" event={"ID":"ce50f1e5-0177-4f3d-a02a-5af653f70001","Type":"ContainerStarted","Data":"58e1978d3e01b87baec4bf1ad0f483273b07ca7ec42244e4624513b63934f398"} Dec 13 06:42:50 crc kubenswrapper[5048]: I1213 06:42:50.272629 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-6769fb99d-lk9l5" podStartSLOduration=2.190043037 podStartE2EDuration="6.272598228s" podCreationTimestamp="2025-12-13 06:42:44 +0000 UTC" firstStartedPulling="2025-12-13 06:42:45.33321387 +0000 UTC m=+799.199808451" lastFinishedPulling="2025-12-13 06:42:49.415769061 +0000 UTC m=+803.282363642" observedRunningTime="2025-12-13 06:42:50.269264752 +0000 UTC m=+804.135859333" watchObservedRunningTime="2025-12-13 06:42:50.272598228 +0000 UTC m=+804.139192809" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.347755 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f7f7578db-wvmhg"] Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.349845 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-wvmhg" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.351757 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-88m95" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.352010 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-f8fb84555-bswmw"] Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.352853 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-f8fb84555-bswmw" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.357573 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.364702 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f7f7578db-wvmhg"] Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.467410 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-b4xzd"] Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.468358 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-f8fb84555-bswmw"] Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.468527 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-b4xzd" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.549289 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n5c7s\" (UniqueName: \"kubernetes.io/projected/a6abecb0-a854-4dbb-9fb0-5ba03e64daae-kube-api-access-n5c7s\") pod \"nmstate-webhook-f8fb84555-bswmw\" (UID: \"a6abecb0-a854-4dbb-9fb0-5ba03e64daae\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-bswmw" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.549341 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/a6abecb0-a854-4dbb-9fb0-5ba03e64daae-tls-key-pair\") pod \"nmstate-webhook-f8fb84555-bswmw\" (UID: \"a6abecb0-a854-4dbb-9fb0-5ba03e64daae\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-bswmw" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.549805 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdj2w\" (UniqueName: \"kubernetes.io/projected/4fe5b93a-b962-4feb-85cf-1210c428f7f6-kube-api-access-xdj2w\") pod \"nmstate-metrics-7f7f7578db-wvmhg\" (UID: \"4fe5b93a-b962-4feb-85cf-1210c428f7f6\") " pod="openshift-nmstate/nmstate-metrics-7f7f7578db-wvmhg" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.557482 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6ff7998486-ppsbf"] Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.558334 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-ppsbf" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.560497 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.560572 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.560756 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-9cp2s" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.576176 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6ff7998486-ppsbf"] Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.650832 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/85ef8f1d-8549-4a84-a38c-aefa9e4e5583-plugin-serving-cert\") pod \"nmstate-console-plugin-6ff7998486-ppsbf\" (UID: \"85ef8f1d-8549-4a84-a38c-aefa9e4e5583\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-ppsbf" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.650872 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/d4103fb8-a0f4-4b91-b08a-6047a1f4df6e-ovs-socket\") pod \"nmstate-handler-b4xzd\" (UID: \"d4103fb8-a0f4-4b91-b08a-6047a1f4df6e\") " pod="openshift-nmstate/nmstate-handler-b4xzd" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.650904 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdj2w\" (UniqueName: \"kubernetes.io/projected/4fe5b93a-b962-4feb-85cf-1210c428f7f6-kube-api-access-xdj2w\") pod \"nmstate-metrics-7f7f7578db-wvmhg\" (UID: \"4fe5b93a-b962-4feb-85cf-1210c428f7f6\") " pod="openshift-nmstate/nmstate-metrics-7f7f7578db-wvmhg" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.650927 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/d4103fb8-a0f4-4b91-b08a-6047a1f4df6e-nmstate-lock\") pod \"nmstate-handler-b4xzd\" (UID: \"d4103fb8-a0f4-4b91-b08a-6047a1f4df6e\") " pod="openshift-nmstate/nmstate-handler-b4xzd" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.650949 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/85ef8f1d-8549-4a84-a38c-aefa9e4e5583-nginx-conf\") pod \"nmstate-console-plugin-6ff7998486-ppsbf\" (UID: \"85ef8f1d-8549-4a84-a38c-aefa9e4e5583\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-ppsbf" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.650966 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9v5p5\" (UniqueName: \"kubernetes.io/projected/d4103fb8-a0f4-4b91-b08a-6047a1f4df6e-kube-api-access-9v5p5\") pod \"nmstate-handler-b4xzd\" (UID: \"d4103fb8-a0f4-4b91-b08a-6047a1f4df6e\") " pod="openshift-nmstate/nmstate-handler-b4xzd" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.651193 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/d4103fb8-a0f4-4b91-b08a-6047a1f4df6e-dbus-socket\") pod \"nmstate-handler-b4xzd\" (UID: \"d4103fb8-a0f4-4b91-b08a-6047a1f4df6e\") " pod="openshift-nmstate/nmstate-handler-b4xzd" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.651269 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n5c7s\" (UniqueName: \"kubernetes.io/projected/a6abecb0-a854-4dbb-9fb0-5ba03e64daae-kube-api-access-n5c7s\") pod \"nmstate-webhook-f8fb84555-bswmw\" (UID: \"a6abecb0-a854-4dbb-9fb0-5ba03e64daae\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-bswmw" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.651304 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/a6abecb0-a854-4dbb-9fb0-5ba03e64daae-tls-key-pair\") pod \"nmstate-webhook-f8fb84555-bswmw\" (UID: \"a6abecb0-a854-4dbb-9fb0-5ba03e64daae\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-bswmw" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.651329 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9nbgc\" (UniqueName: \"kubernetes.io/projected/85ef8f1d-8549-4a84-a38c-aefa9e4e5583-kube-api-access-9nbgc\") pod \"nmstate-console-plugin-6ff7998486-ppsbf\" (UID: \"85ef8f1d-8549-4a84-a38c-aefa9e4e5583\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-ppsbf" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.656663 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/a6abecb0-a854-4dbb-9fb0-5ba03e64daae-tls-key-pair\") pod \"nmstate-webhook-f8fb84555-bswmw\" (UID: \"a6abecb0-a854-4dbb-9fb0-5ba03e64daae\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-bswmw" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.675175 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdj2w\" (UniqueName: \"kubernetes.io/projected/4fe5b93a-b962-4feb-85cf-1210c428f7f6-kube-api-access-xdj2w\") pod \"nmstate-metrics-7f7f7578db-wvmhg\" (UID: \"4fe5b93a-b962-4feb-85cf-1210c428f7f6\") " pod="openshift-nmstate/nmstate-metrics-7f7f7578db-wvmhg" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.678026 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-wvmhg" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.687564 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n5c7s\" (UniqueName: \"kubernetes.io/projected/a6abecb0-a854-4dbb-9fb0-5ba03e64daae-kube-api-access-n5c7s\") pod \"nmstate-webhook-f8fb84555-bswmw\" (UID: \"a6abecb0-a854-4dbb-9fb0-5ba03e64daae\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-bswmw" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.688056 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-f8fb84555-bswmw" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.751913 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/d4103fb8-a0f4-4b91-b08a-6047a1f4df6e-nmstate-lock\") pod \"nmstate-handler-b4xzd\" (UID: \"d4103fb8-a0f4-4b91-b08a-6047a1f4df6e\") " pod="openshift-nmstate/nmstate-handler-b4xzd" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.752273 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/85ef8f1d-8549-4a84-a38c-aefa9e4e5583-nginx-conf\") pod \"nmstate-console-plugin-6ff7998486-ppsbf\" (UID: \"85ef8f1d-8549-4a84-a38c-aefa9e4e5583\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-ppsbf" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.752331 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9v5p5\" (UniqueName: \"kubernetes.io/projected/d4103fb8-a0f4-4b91-b08a-6047a1f4df6e-kube-api-access-9v5p5\") pod \"nmstate-handler-b4xzd\" (UID: \"d4103fb8-a0f4-4b91-b08a-6047a1f4df6e\") " pod="openshift-nmstate/nmstate-handler-b4xzd" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.752381 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/d4103fb8-a0f4-4b91-b08a-6047a1f4df6e-dbus-socket\") pod \"nmstate-handler-b4xzd\" (UID: \"d4103fb8-a0f4-4b91-b08a-6047a1f4df6e\") " pod="openshift-nmstate/nmstate-handler-b4xzd" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.752416 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9nbgc\" (UniqueName: \"kubernetes.io/projected/85ef8f1d-8549-4a84-a38c-aefa9e4e5583-kube-api-access-9nbgc\") pod \"nmstate-console-plugin-6ff7998486-ppsbf\" (UID: \"85ef8f1d-8549-4a84-a38c-aefa9e4e5583\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-ppsbf" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.752470 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/85ef8f1d-8549-4a84-a38c-aefa9e4e5583-plugin-serving-cert\") pod \"nmstate-console-plugin-6ff7998486-ppsbf\" (UID: \"85ef8f1d-8549-4a84-a38c-aefa9e4e5583\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-ppsbf" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.752495 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/d4103fb8-a0f4-4b91-b08a-6047a1f4df6e-ovs-socket\") pod \"nmstate-handler-b4xzd\" (UID: \"d4103fb8-a0f4-4b91-b08a-6047a1f4df6e\") " pod="openshift-nmstate/nmstate-handler-b4xzd" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.752566 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/d4103fb8-a0f4-4b91-b08a-6047a1f4df6e-ovs-socket\") pod \"nmstate-handler-b4xzd\" (UID: \"d4103fb8-a0f4-4b91-b08a-6047a1f4df6e\") " pod="openshift-nmstate/nmstate-handler-b4xzd" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.751985 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/d4103fb8-a0f4-4b91-b08a-6047a1f4df6e-nmstate-lock\") pod \"nmstate-handler-b4xzd\" (UID: \"d4103fb8-a0f4-4b91-b08a-6047a1f4df6e\") " pod="openshift-nmstate/nmstate-handler-b4xzd" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.753613 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/85ef8f1d-8549-4a84-a38c-aefa9e4e5583-nginx-conf\") pod \"nmstate-console-plugin-6ff7998486-ppsbf\" (UID: \"85ef8f1d-8549-4a84-a38c-aefa9e4e5583\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-ppsbf" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.753658 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/d4103fb8-a0f4-4b91-b08a-6047a1f4df6e-dbus-socket\") pod \"nmstate-handler-b4xzd\" (UID: \"d4103fb8-a0f4-4b91-b08a-6047a1f4df6e\") " pod="openshift-nmstate/nmstate-handler-b4xzd" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.760884 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-58f88f67c6-vhmqs"] Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.761007 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/85ef8f1d-8549-4a84-a38c-aefa9e4e5583-plugin-serving-cert\") pod \"nmstate-console-plugin-6ff7998486-ppsbf\" (UID: \"85ef8f1d-8549-4a84-a38c-aefa9e4e5583\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-ppsbf" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.763798 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.774490 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9v5p5\" (UniqueName: \"kubernetes.io/projected/d4103fb8-a0f4-4b91-b08a-6047a1f4df6e-kube-api-access-9v5p5\") pod \"nmstate-handler-b4xzd\" (UID: \"d4103fb8-a0f4-4b91-b08a-6047a1f4df6e\") " pod="openshift-nmstate/nmstate-handler-b4xzd" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.782584 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-58f88f67c6-vhmqs"] Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.785954 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9nbgc\" (UniqueName: \"kubernetes.io/projected/85ef8f1d-8549-4a84-a38c-aefa9e4e5583-kube-api-access-9nbgc\") pod \"nmstate-console-plugin-6ff7998486-ppsbf\" (UID: \"85ef8f1d-8549-4a84-a38c-aefa9e4e5583\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-ppsbf" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.786345 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-b4xzd" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.887092 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/15a3014f-c192-42d0-a6bd-50114640df70-console-oauth-config\") pod \"console-58f88f67c6-vhmqs\" (UID: \"15a3014f-c192-42d0-a6bd-50114640df70\") " pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.887132 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/15a3014f-c192-42d0-a6bd-50114640df70-console-serving-cert\") pod \"console-58f88f67c6-vhmqs\" (UID: \"15a3014f-c192-42d0-a6bd-50114640df70\") " pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.887154 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/15a3014f-c192-42d0-a6bd-50114640df70-service-ca\") pod \"console-58f88f67c6-vhmqs\" (UID: \"15a3014f-c192-42d0-a6bd-50114640df70\") " pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.887175 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xg8bj\" (UniqueName: \"kubernetes.io/projected/15a3014f-c192-42d0-a6bd-50114640df70-kube-api-access-xg8bj\") pod \"console-58f88f67c6-vhmqs\" (UID: \"15a3014f-c192-42d0-a6bd-50114640df70\") " pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.887205 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/15a3014f-c192-42d0-a6bd-50114640df70-console-config\") pod \"console-58f88f67c6-vhmqs\" (UID: \"15a3014f-c192-42d0-a6bd-50114640df70\") " pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.887228 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/15a3014f-c192-42d0-a6bd-50114640df70-trusted-ca-bundle\") pod \"console-58f88f67c6-vhmqs\" (UID: \"15a3014f-c192-42d0-a6bd-50114640df70\") " pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.887255 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/15a3014f-c192-42d0-a6bd-50114640df70-oauth-serving-cert\") pod \"console-58f88f67c6-vhmqs\" (UID: \"15a3014f-c192-42d0-a6bd-50114640df70\") " pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.887471 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-ppsbf" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.991868 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/15a3014f-c192-42d0-a6bd-50114640df70-console-config\") pod \"console-58f88f67c6-vhmqs\" (UID: \"15a3014f-c192-42d0-a6bd-50114640df70\") " pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.992232 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/15a3014f-c192-42d0-a6bd-50114640df70-trusted-ca-bundle\") pod \"console-58f88f67c6-vhmqs\" (UID: \"15a3014f-c192-42d0-a6bd-50114640df70\") " pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.992266 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/15a3014f-c192-42d0-a6bd-50114640df70-oauth-serving-cert\") pod \"console-58f88f67c6-vhmqs\" (UID: \"15a3014f-c192-42d0-a6bd-50114640df70\") " pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.992303 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/15a3014f-c192-42d0-a6bd-50114640df70-console-oauth-config\") pod \"console-58f88f67c6-vhmqs\" (UID: \"15a3014f-c192-42d0-a6bd-50114640df70\") " pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.992326 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/15a3014f-c192-42d0-a6bd-50114640df70-console-serving-cert\") pod \"console-58f88f67c6-vhmqs\" (UID: \"15a3014f-c192-42d0-a6bd-50114640df70\") " pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.992347 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/15a3014f-c192-42d0-a6bd-50114640df70-service-ca\") pod \"console-58f88f67c6-vhmqs\" (UID: \"15a3014f-c192-42d0-a6bd-50114640df70\") " pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.992370 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xg8bj\" (UniqueName: \"kubernetes.io/projected/15a3014f-c192-42d0-a6bd-50114640df70-kube-api-access-xg8bj\") pod \"console-58f88f67c6-vhmqs\" (UID: \"15a3014f-c192-42d0-a6bd-50114640df70\") " pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.993263 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/15a3014f-c192-42d0-a6bd-50114640df70-console-config\") pod \"console-58f88f67c6-vhmqs\" (UID: \"15a3014f-c192-42d0-a6bd-50114640df70\") " pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.993281 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/15a3014f-c192-42d0-a6bd-50114640df70-service-ca\") pod \"console-58f88f67c6-vhmqs\" (UID: \"15a3014f-c192-42d0-a6bd-50114640df70\") " pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.993457 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/15a3014f-c192-42d0-a6bd-50114640df70-oauth-serving-cert\") pod \"console-58f88f67c6-vhmqs\" (UID: \"15a3014f-c192-42d0-a6bd-50114640df70\") " pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.993539 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/15a3014f-c192-42d0-a6bd-50114640df70-trusted-ca-bundle\") pod \"console-58f88f67c6-vhmqs\" (UID: \"15a3014f-c192-42d0-a6bd-50114640df70\") " pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.999123 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/15a3014f-c192-42d0-a6bd-50114640df70-console-serving-cert\") pod \"console-58f88f67c6-vhmqs\" (UID: \"15a3014f-c192-42d0-a6bd-50114640df70\") " pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:54 crc kubenswrapper[5048]: I1213 06:42:54.999590 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/15a3014f-c192-42d0-a6bd-50114640df70-console-oauth-config\") pod \"console-58f88f67c6-vhmqs\" (UID: \"15a3014f-c192-42d0-a6bd-50114640df70\") " pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:55 crc kubenswrapper[5048]: I1213 06:42:55.010310 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xg8bj\" (UniqueName: \"kubernetes.io/projected/15a3014f-c192-42d0-a6bd-50114640df70-kube-api-access-xg8bj\") pod \"console-58f88f67c6-vhmqs\" (UID: \"15a3014f-c192-42d0-a6bd-50114640df70\") " pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:55 crc kubenswrapper[5048]: I1213 06:42:55.022392 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-f8fb84555-bswmw"] Dec 13 06:42:55 crc kubenswrapper[5048]: I1213 06:42:55.105685 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:42:55 crc kubenswrapper[5048]: I1213 06:42:55.166896 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6ff7998486-ppsbf"] Dec 13 06:42:55 crc kubenswrapper[5048]: W1213 06:42:55.174135 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod85ef8f1d_8549_4a84_a38c_aefa9e4e5583.slice/crio-6a1f0578f69431399f45439ec521009fa4a8673c46c3efc7d2e191e485c825bf WatchSource:0}: Error finding container 6a1f0578f69431399f45439ec521009fa4a8673c46c3efc7d2e191e485c825bf: Status 404 returned error can't find the container with id 6a1f0578f69431399f45439ec521009fa4a8673c46c3efc7d2e191e485c825bf Dec 13 06:42:55 crc kubenswrapper[5048]: I1213 06:42:55.286789 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-b4xzd" event={"ID":"d4103fb8-a0f4-4b91-b08a-6047a1f4df6e","Type":"ContainerStarted","Data":"652cb87647e77d968d2955dd34affd086dec6b834e2f24f818763a834e1c0cde"} Dec 13 06:42:55 crc kubenswrapper[5048]: I1213 06:42:55.288406 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-ppsbf" event={"ID":"85ef8f1d-8549-4a84-a38c-aefa9e4e5583","Type":"ContainerStarted","Data":"6a1f0578f69431399f45439ec521009fa4a8673c46c3efc7d2e191e485c825bf"} Dec 13 06:42:55 crc kubenswrapper[5048]: I1213 06:42:55.290002 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-f8fb84555-bswmw" event={"ID":"a6abecb0-a854-4dbb-9fb0-5ba03e64daae","Type":"ContainerStarted","Data":"c6059d04cdab75091a2125d4b878eff08d33a5c01c3cec19c5958fedab738367"} Dec 13 06:42:55 crc kubenswrapper[5048]: I1213 06:42:55.305550 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f7f7578db-wvmhg"] Dec 13 06:42:55 crc kubenswrapper[5048]: I1213 06:42:55.346899 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-58f88f67c6-vhmqs"] Dec 13 06:42:55 crc kubenswrapper[5048]: W1213 06:42:55.351815 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod15a3014f_c192_42d0_a6bd_50114640df70.slice/crio-f791cc9e8972a063db7b26c06775f7477123b483139bec458be2c8c889c10cb0 WatchSource:0}: Error finding container f791cc9e8972a063db7b26c06775f7477123b483139bec458be2c8c889c10cb0: Status 404 returned error can't find the container with id f791cc9e8972a063db7b26c06775f7477123b483139bec458be2c8c889c10cb0 Dec 13 06:42:56 crc kubenswrapper[5048]: I1213 06:42:56.296949 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-wvmhg" event={"ID":"4fe5b93a-b962-4feb-85cf-1210c428f7f6","Type":"ContainerStarted","Data":"fa478ec7177eff95854e4f4836764a4b1aa8ac582f3d63b9704fa59afb31a8a8"} Dec 13 06:42:56 crc kubenswrapper[5048]: I1213 06:42:56.299271 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-58f88f67c6-vhmqs" event={"ID":"15a3014f-c192-42d0-a6bd-50114640df70","Type":"ContainerStarted","Data":"9214c8141480ac419d8640a3dbd33fa4afc5185ee4f02760cd45b21c7a1c9214"} Dec 13 06:42:56 crc kubenswrapper[5048]: I1213 06:42:56.299300 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-58f88f67c6-vhmqs" event={"ID":"15a3014f-c192-42d0-a6bd-50114640df70","Type":"ContainerStarted","Data":"f791cc9e8972a063db7b26c06775f7477123b483139bec458be2c8c889c10cb0"} Dec 13 06:42:56 crc kubenswrapper[5048]: I1213 06:42:56.318778 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-58f88f67c6-vhmqs" podStartSLOduration=2.318762537 podStartE2EDuration="2.318762537s" podCreationTimestamp="2025-12-13 06:42:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:42:56.317519245 +0000 UTC m=+810.184113846" watchObservedRunningTime="2025-12-13 06:42:56.318762537 +0000 UTC m=+810.185357118" Dec 13 06:42:56 crc kubenswrapper[5048]: I1213 06:42:56.737170 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nj8jx" Dec 13 06:42:56 crc kubenswrapper[5048]: I1213 06:42:56.798038 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nj8jx" Dec 13 06:42:56 crc kubenswrapper[5048]: I1213 06:42:56.969629 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nj8jx"] Dec 13 06:42:58 crc kubenswrapper[5048]: I1213 06:42:58.355666 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-nj8jx" podUID="e8549450-c793-4fca-9f52-d7650d704a31" containerName="registry-server" containerID="cri-o://6713eee8dbf281cd953ac042918461fccfcb0596d899c92f08502bdc9b609084" gracePeriod=2 Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.263406 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nj8jx" Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.369019 5048 generic.go:334] "Generic (PLEG): container finished" podID="e8549450-c793-4fca-9f52-d7650d704a31" containerID="6713eee8dbf281cd953ac042918461fccfcb0596d899c92f08502bdc9b609084" exitCode=0 Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.369058 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nj8jx" event={"ID":"e8549450-c793-4fca-9f52-d7650d704a31","Type":"ContainerDied","Data":"6713eee8dbf281cd953ac042918461fccfcb0596d899c92f08502bdc9b609084"} Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.369081 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nj8jx" event={"ID":"e8549450-c793-4fca-9f52-d7650d704a31","Type":"ContainerDied","Data":"d7e5ab823fec0624c2eefa9ed8ab331bfb0c50f2ccd9b294b4cb1ba2324ace62"} Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.369097 5048 scope.go:117] "RemoveContainer" containerID="6713eee8dbf281cd953ac042918461fccfcb0596d899c92f08502bdc9b609084" Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.369186 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nj8jx" Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.388945 5048 scope.go:117] "RemoveContainer" containerID="c21c4c20d67dc369508f48f431af9b8880f8736981aa72da096cc5ab3b002ae3" Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.410381 5048 scope.go:117] "RemoveContainer" containerID="9d8258c06505b2b3a13a526d3586aefc3e3f980860d920d689059e8e819c8d28" Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.418340 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8549450-c793-4fca-9f52-d7650d704a31-catalog-content\") pod \"e8549450-c793-4fca-9f52-d7650d704a31\" (UID: \"e8549450-c793-4fca-9f52-d7650d704a31\") " Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.418402 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8549450-c793-4fca-9f52-d7650d704a31-utilities\") pod \"e8549450-c793-4fca-9f52-d7650d704a31\" (UID: \"e8549450-c793-4fca-9f52-d7650d704a31\") " Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.418518 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vw6pj\" (UniqueName: \"kubernetes.io/projected/e8549450-c793-4fca-9f52-d7650d704a31-kube-api-access-vw6pj\") pod \"e8549450-c793-4fca-9f52-d7650d704a31\" (UID: \"e8549450-c793-4fca-9f52-d7650d704a31\") " Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.419728 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8549450-c793-4fca-9f52-d7650d704a31-utilities" (OuterVolumeSpecName: "utilities") pod "e8549450-c793-4fca-9f52-d7650d704a31" (UID: "e8549450-c793-4fca-9f52-d7650d704a31"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.425705 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8549450-c793-4fca-9f52-d7650d704a31-kube-api-access-vw6pj" (OuterVolumeSpecName: "kube-api-access-vw6pj") pod "e8549450-c793-4fca-9f52-d7650d704a31" (UID: "e8549450-c793-4fca-9f52-d7650d704a31"). InnerVolumeSpecName "kube-api-access-vw6pj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.435965 5048 scope.go:117] "RemoveContainer" containerID="6713eee8dbf281cd953ac042918461fccfcb0596d899c92f08502bdc9b609084" Dec 13 06:42:59 crc kubenswrapper[5048]: E1213 06:42:59.436503 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6713eee8dbf281cd953ac042918461fccfcb0596d899c92f08502bdc9b609084\": container with ID starting with 6713eee8dbf281cd953ac042918461fccfcb0596d899c92f08502bdc9b609084 not found: ID does not exist" containerID="6713eee8dbf281cd953ac042918461fccfcb0596d899c92f08502bdc9b609084" Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.436548 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6713eee8dbf281cd953ac042918461fccfcb0596d899c92f08502bdc9b609084"} err="failed to get container status \"6713eee8dbf281cd953ac042918461fccfcb0596d899c92f08502bdc9b609084\": rpc error: code = NotFound desc = could not find container \"6713eee8dbf281cd953ac042918461fccfcb0596d899c92f08502bdc9b609084\": container with ID starting with 6713eee8dbf281cd953ac042918461fccfcb0596d899c92f08502bdc9b609084 not found: ID does not exist" Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.436570 5048 scope.go:117] "RemoveContainer" containerID="c21c4c20d67dc369508f48f431af9b8880f8736981aa72da096cc5ab3b002ae3" Dec 13 06:42:59 crc kubenswrapper[5048]: E1213 06:42:59.436896 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c21c4c20d67dc369508f48f431af9b8880f8736981aa72da096cc5ab3b002ae3\": container with ID starting with c21c4c20d67dc369508f48f431af9b8880f8736981aa72da096cc5ab3b002ae3 not found: ID does not exist" containerID="c21c4c20d67dc369508f48f431af9b8880f8736981aa72da096cc5ab3b002ae3" Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.436927 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c21c4c20d67dc369508f48f431af9b8880f8736981aa72da096cc5ab3b002ae3"} err="failed to get container status \"c21c4c20d67dc369508f48f431af9b8880f8736981aa72da096cc5ab3b002ae3\": rpc error: code = NotFound desc = could not find container \"c21c4c20d67dc369508f48f431af9b8880f8736981aa72da096cc5ab3b002ae3\": container with ID starting with c21c4c20d67dc369508f48f431af9b8880f8736981aa72da096cc5ab3b002ae3 not found: ID does not exist" Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.436945 5048 scope.go:117] "RemoveContainer" containerID="9d8258c06505b2b3a13a526d3586aefc3e3f980860d920d689059e8e819c8d28" Dec 13 06:42:59 crc kubenswrapper[5048]: E1213 06:42:59.437344 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d8258c06505b2b3a13a526d3586aefc3e3f980860d920d689059e8e819c8d28\": container with ID starting with 9d8258c06505b2b3a13a526d3586aefc3e3f980860d920d689059e8e819c8d28 not found: ID does not exist" containerID="9d8258c06505b2b3a13a526d3586aefc3e3f980860d920d689059e8e819c8d28" Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.437392 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d8258c06505b2b3a13a526d3586aefc3e3f980860d920d689059e8e819c8d28"} err="failed to get container status \"9d8258c06505b2b3a13a526d3586aefc3e3f980860d920d689059e8e819c8d28\": rpc error: code = NotFound desc = could not find container \"9d8258c06505b2b3a13a526d3586aefc3e3f980860d920d689059e8e819c8d28\": container with ID starting with 9d8258c06505b2b3a13a526d3586aefc3e3f980860d920d689059e8e819c8d28 not found: ID does not exist" Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.520307 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8549450-c793-4fca-9f52-d7650d704a31-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.520737 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vw6pj\" (UniqueName: \"kubernetes.io/projected/e8549450-c793-4fca-9f52-d7650d704a31-kube-api-access-vw6pj\") on node \"crc\" DevicePath \"\"" Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.555159 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8549450-c793-4fca-9f52-d7650d704a31-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e8549450-c793-4fca-9f52-d7650d704a31" (UID: "e8549450-c793-4fca-9f52-d7650d704a31"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.622509 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8549450-c793-4fca-9f52-d7650d704a31-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.697543 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nj8jx"] Dec 13 06:42:59 crc kubenswrapper[5048]: I1213 06:42:59.709211 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-nj8jx"] Dec 13 06:43:00 crc kubenswrapper[5048]: I1213 06:43:00.378982 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-ppsbf" event={"ID":"85ef8f1d-8549-4a84-a38c-aefa9e4e5583","Type":"ContainerStarted","Data":"199d5c5683ffeae0ef533481d79e1b5754b4edb03fc2baf4c7c5ea69b4e7f260"} Dec 13 06:43:00 crc kubenswrapper[5048]: I1213 06:43:00.383032 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-f8fb84555-bswmw" event={"ID":"a6abecb0-a854-4dbb-9fb0-5ba03e64daae","Type":"ContainerStarted","Data":"b576c9914c4a5d36e2dff6cd86e76f31b78a7deaecdbe62065c1bc9d4de2c627"} Dec 13 06:43:00 crc kubenswrapper[5048]: I1213 06:43:00.383200 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-f8fb84555-bswmw" Dec 13 06:43:00 crc kubenswrapper[5048]: I1213 06:43:00.385332 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-wvmhg" event={"ID":"4fe5b93a-b962-4feb-85cf-1210c428f7f6","Type":"ContainerStarted","Data":"36cfe05fb0b259896084f2d5c6ce34e249181b3ffd7e698bde08364bdb19d029"} Dec 13 06:43:00 crc kubenswrapper[5048]: I1213 06:43:00.386409 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-b4xzd" event={"ID":"d4103fb8-a0f4-4b91-b08a-6047a1f4df6e","Type":"ContainerStarted","Data":"58aa438b315d1d80fa137295e221f8837cfe05ea7b60eca8d10cf1015380bdb8"} Dec 13 06:43:00 crc kubenswrapper[5048]: I1213 06:43:00.386835 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-b4xzd" Dec 13 06:43:00 crc kubenswrapper[5048]: I1213 06:43:00.398422 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-ppsbf" podStartSLOduration=2.58906486 podStartE2EDuration="6.398405035s" podCreationTimestamp="2025-12-13 06:42:54 +0000 UTC" firstStartedPulling="2025-12-13 06:42:55.17701695 +0000 UTC m=+809.043611541" lastFinishedPulling="2025-12-13 06:42:58.986357135 +0000 UTC m=+812.852951716" observedRunningTime="2025-12-13 06:43:00.391991518 +0000 UTC m=+814.258586099" watchObservedRunningTime="2025-12-13 06:43:00.398405035 +0000 UTC m=+814.264999616" Dec 13 06:43:00 crc kubenswrapper[5048]: I1213 06:43:00.407938 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-b4xzd" podStartSLOduration=2.216574551 podStartE2EDuration="6.407917771s" podCreationTimestamp="2025-12-13 06:42:54 +0000 UTC" firstStartedPulling="2025-12-13 06:42:54.821323706 +0000 UTC m=+808.687918287" lastFinishedPulling="2025-12-13 06:42:59.012666906 +0000 UTC m=+812.879261507" observedRunningTime="2025-12-13 06:43:00.407586623 +0000 UTC m=+814.274181204" watchObservedRunningTime="2025-12-13 06:43:00.407917771 +0000 UTC m=+814.274512352" Dec 13 06:43:00 crc kubenswrapper[5048]: I1213 06:43:00.574648 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8549450-c793-4fca-9f52-d7650d704a31" path="/var/lib/kubelet/pods/e8549450-c793-4fca-9f52-d7650d704a31/volumes" Dec 13 06:43:03 crc kubenswrapper[5048]: I1213 06:43:03.404635 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-wvmhg" event={"ID":"4fe5b93a-b962-4feb-85cf-1210c428f7f6","Type":"ContainerStarted","Data":"207a477ddc167fc1aaf36332bd97c3917d7d6ea8120ac032472de623a2763d1e"} Dec 13 06:43:03 crc kubenswrapper[5048]: I1213 06:43:03.424279 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-wvmhg" podStartSLOduration=2.376951156 podStartE2EDuration="9.424255561s" podCreationTimestamp="2025-12-13 06:42:54 +0000 UTC" firstStartedPulling="2025-12-13 06:42:55.311247778 +0000 UTC m=+809.177842359" lastFinishedPulling="2025-12-13 06:43:02.358552183 +0000 UTC m=+816.225146764" observedRunningTime="2025-12-13 06:43:03.420466032 +0000 UTC m=+817.287060613" watchObservedRunningTime="2025-12-13 06:43:03.424255561 +0000 UTC m=+817.290850152" Dec 13 06:43:03 crc kubenswrapper[5048]: I1213 06:43:03.424958 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-f8fb84555-bswmw" podStartSLOduration=5.472525248 podStartE2EDuration="9.424950579s" podCreationTimestamp="2025-12-13 06:42:54 +0000 UTC" firstStartedPulling="2025-12-13 06:42:55.034997651 +0000 UTC m=+808.901592232" lastFinishedPulling="2025-12-13 06:42:58.987422982 +0000 UTC m=+812.854017563" observedRunningTime="2025-12-13 06:43:00.427374076 +0000 UTC m=+814.293968647" watchObservedRunningTime="2025-12-13 06:43:03.424950579 +0000 UTC m=+817.291545160" Dec 13 06:43:04 crc kubenswrapper[5048]: I1213 06:43:04.816850 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-b4xzd" Dec 13 06:43:05 crc kubenswrapper[5048]: I1213 06:43:05.106563 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:43:05 crc kubenswrapper[5048]: I1213 06:43:05.106634 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:43:05 crc kubenswrapper[5048]: I1213 06:43:05.111985 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:43:05 crc kubenswrapper[5048]: I1213 06:43:05.417984 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-58f88f67c6-vhmqs" Dec 13 06:43:05 crc kubenswrapper[5048]: I1213 06:43:05.466982 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-5nt4n"] Dec 13 06:43:14 crc kubenswrapper[5048]: I1213 06:43:14.694065 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-f8fb84555-bswmw" Dec 13 06:43:16 crc kubenswrapper[5048]: I1213 06:43:16.215651 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:43:16 crc kubenswrapper[5048]: I1213 06:43:16.215906 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:43:16 crc kubenswrapper[5048]: I1213 06:43:16.215956 5048 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 06:43:16 crc kubenswrapper[5048]: I1213 06:43:16.216564 5048 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"04bb8ca387b7d0e469a66283e3b83e0b6d5378cbf2f1611cc5c1df9ba125a043"} pod="openshift-machine-config-operator/machine-config-daemon-j7hns" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 13 06:43:16 crc kubenswrapper[5048]: I1213 06:43:16.216627 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" containerID="cri-o://04bb8ca387b7d0e469a66283e3b83e0b6d5378cbf2f1611cc5c1df9ba125a043" gracePeriod=600 Dec 13 06:43:16 crc kubenswrapper[5048]: I1213 06:43:16.476616 5048 generic.go:334] "Generic (PLEG): container finished" podID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerID="04bb8ca387b7d0e469a66283e3b83e0b6d5378cbf2f1611cc5c1df9ba125a043" exitCode=0 Dec 13 06:43:16 crc kubenswrapper[5048]: I1213 06:43:16.476689 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerDied","Data":"04bb8ca387b7d0e469a66283e3b83e0b6d5378cbf2f1611cc5c1df9ba125a043"} Dec 13 06:43:16 crc kubenswrapper[5048]: I1213 06:43:16.477023 5048 scope.go:117] "RemoveContainer" containerID="43141bab8f04179c1cc49c5c276db3116f7a7de3d0b4075641053d18b79b3930" Dec 13 06:43:17 crc kubenswrapper[5048]: I1213 06:43:17.486150 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerStarted","Data":"b78100e7e0c4b665e6330e2c03f1531c2cd133387b8c2c53260a2c5bf79c77e3"} Dec 13 06:43:27 crc kubenswrapper[5048]: I1213 06:43:27.217586 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6"] Dec 13 06:43:27 crc kubenswrapper[5048]: E1213 06:43:27.218410 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8549450-c793-4fca-9f52-d7650d704a31" containerName="registry-server" Dec 13 06:43:27 crc kubenswrapper[5048]: I1213 06:43:27.218426 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8549450-c793-4fca-9f52-d7650d704a31" containerName="registry-server" Dec 13 06:43:27 crc kubenswrapper[5048]: E1213 06:43:27.218468 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8549450-c793-4fca-9f52-d7650d704a31" containerName="extract-content" Dec 13 06:43:27 crc kubenswrapper[5048]: I1213 06:43:27.218477 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8549450-c793-4fca-9f52-d7650d704a31" containerName="extract-content" Dec 13 06:43:27 crc kubenswrapper[5048]: E1213 06:43:27.218500 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8549450-c793-4fca-9f52-d7650d704a31" containerName="extract-utilities" Dec 13 06:43:27 crc kubenswrapper[5048]: I1213 06:43:27.218509 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8549450-c793-4fca-9f52-d7650d704a31" containerName="extract-utilities" Dec 13 06:43:27 crc kubenswrapper[5048]: I1213 06:43:27.218649 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8549450-c793-4fca-9f52-d7650d704a31" containerName="registry-server" Dec 13 06:43:27 crc kubenswrapper[5048]: I1213 06:43:27.219669 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6" Dec 13 06:43:27 crc kubenswrapper[5048]: I1213 06:43:27.224725 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 13 06:43:27 crc kubenswrapper[5048]: I1213 06:43:27.225786 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6"] Dec 13 06:43:27 crc kubenswrapper[5048]: I1213 06:43:27.294698 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fe828039-1927-4005-a732-ce1cb2fb898c-util\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6\" (UID: \"fe828039-1927-4005-a732-ce1cb2fb898c\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6" Dec 13 06:43:27 crc kubenswrapper[5048]: I1213 06:43:27.294744 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fe828039-1927-4005-a732-ce1cb2fb898c-bundle\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6\" (UID: \"fe828039-1927-4005-a732-ce1cb2fb898c\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6" Dec 13 06:43:27 crc kubenswrapper[5048]: I1213 06:43:27.294774 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t96rq\" (UniqueName: \"kubernetes.io/projected/fe828039-1927-4005-a732-ce1cb2fb898c-kube-api-access-t96rq\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6\" (UID: \"fe828039-1927-4005-a732-ce1cb2fb898c\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6" Dec 13 06:43:27 crc kubenswrapper[5048]: I1213 06:43:27.396548 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fe828039-1927-4005-a732-ce1cb2fb898c-bundle\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6\" (UID: \"fe828039-1927-4005-a732-ce1cb2fb898c\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6" Dec 13 06:43:27 crc kubenswrapper[5048]: I1213 06:43:27.397014 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fe828039-1927-4005-a732-ce1cb2fb898c-bundle\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6\" (UID: \"fe828039-1927-4005-a732-ce1cb2fb898c\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6" Dec 13 06:43:27 crc kubenswrapper[5048]: I1213 06:43:27.397021 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fe828039-1927-4005-a732-ce1cb2fb898c-util\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6\" (UID: \"fe828039-1927-4005-a732-ce1cb2fb898c\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6" Dec 13 06:43:27 crc kubenswrapper[5048]: I1213 06:43:27.397059 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t96rq\" (UniqueName: \"kubernetes.io/projected/fe828039-1927-4005-a732-ce1cb2fb898c-kube-api-access-t96rq\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6\" (UID: \"fe828039-1927-4005-a732-ce1cb2fb898c\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6" Dec 13 06:43:27 crc kubenswrapper[5048]: I1213 06:43:27.397308 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fe828039-1927-4005-a732-ce1cb2fb898c-util\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6\" (UID: \"fe828039-1927-4005-a732-ce1cb2fb898c\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6" Dec 13 06:43:27 crc kubenswrapper[5048]: I1213 06:43:27.420270 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t96rq\" (UniqueName: \"kubernetes.io/projected/fe828039-1927-4005-a732-ce1cb2fb898c-kube-api-access-t96rq\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6\" (UID: \"fe828039-1927-4005-a732-ce1cb2fb898c\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6" Dec 13 06:43:27 crc kubenswrapper[5048]: I1213 06:43:27.536613 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6" Dec 13 06:43:27 crc kubenswrapper[5048]: I1213 06:43:27.727484 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6"] Dec 13 06:43:27 crc kubenswrapper[5048]: W1213 06:43:27.734543 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe828039_1927_4005_a732_ce1cb2fb898c.slice/crio-2e7a9463b8f1eb6d7ef9081ac0abf7733fdb00125f6616eacb351520915da63e WatchSource:0}: Error finding container 2e7a9463b8f1eb6d7ef9081ac0abf7733fdb00125f6616eacb351520915da63e: Status 404 returned error can't find the container with id 2e7a9463b8f1eb6d7ef9081ac0abf7733fdb00125f6616eacb351520915da63e Dec 13 06:43:28 crc kubenswrapper[5048]: I1213 06:43:28.551922 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6" event={"ID":"fe828039-1927-4005-a732-ce1cb2fb898c","Type":"ContainerStarted","Data":"59078ce0203c0d26861ee9439ed919ea61e54743db38dd93e7459fd655fe104c"} Dec 13 06:43:28 crc kubenswrapper[5048]: I1213 06:43:28.552157 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6" event={"ID":"fe828039-1927-4005-a732-ce1cb2fb898c","Type":"ContainerStarted","Data":"2e7a9463b8f1eb6d7ef9081ac0abf7733fdb00125f6616eacb351520915da63e"} Dec 13 06:43:29 crc kubenswrapper[5048]: I1213 06:43:29.561497 5048 generic.go:334] "Generic (PLEG): container finished" podID="fe828039-1927-4005-a732-ce1cb2fb898c" containerID="59078ce0203c0d26861ee9439ed919ea61e54743db38dd93e7459fd655fe104c" exitCode=0 Dec 13 06:43:29 crc kubenswrapper[5048]: I1213 06:43:29.561775 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6" event={"ID":"fe828039-1927-4005-a732-ce1cb2fb898c","Type":"ContainerDied","Data":"59078ce0203c0d26861ee9439ed919ea61e54743db38dd93e7459fd655fe104c"} Dec 13 06:43:30 crc kubenswrapper[5048]: I1213 06:43:30.514215 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-5nt4n" podUID="1de0ebfd-b283-4790-badb-fb78d80e6703" containerName="console" containerID="cri-o://93370343880a95b30a6b8d074b79fd64176354922c3f0aa562093d29ff66ec23" gracePeriod=15 Dec 13 06:43:30 crc kubenswrapper[5048]: I1213 06:43:30.847567 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-5nt4n_1de0ebfd-b283-4790-badb-fb78d80e6703/console/0.log" Dec 13 06:43:30 crc kubenswrapper[5048]: I1213 06:43:30.847629 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:43:30 crc kubenswrapper[5048]: I1213 06:43:30.942529 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-oauth-serving-cert\") pod \"1de0ebfd-b283-4790-badb-fb78d80e6703\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " Dec 13 06:43:30 crc kubenswrapper[5048]: I1213 06:43:30.942919 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zn766\" (UniqueName: \"kubernetes.io/projected/1de0ebfd-b283-4790-badb-fb78d80e6703-kube-api-access-zn766\") pod \"1de0ebfd-b283-4790-badb-fb78d80e6703\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " Dec 13 06:43:30 crc kubenswrapper[5048]: I1213 06:43:30.942955 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-service-ca\") pod \"1de0ebfd-b283-4790-badb-fb78d80e6703\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " Dec 13 06:43:30 crc kubenswrapper[5048]: I1213 06:43:30.942987 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-trusted-ca-bundle\") pod \"1de0ebfd-b283-4790-badb-fb78d80e6703\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " Dec 13 06:43:30 crc kubenswrapper[5048]: I1213 06:43:30.943025 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1de0ebfd-b283-4790-badb-fb78d80e6703-console-oauth-config\") pod \"1de0ebfd-b283-4790-badb-fb78d80e6703\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " Dec 13 06:43:30 crc kubenswrapper[5048]: I1213 06:43:30.943051 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-console-config\") pod \"1de0ebfd-b283-4790-badb-fb78d80e6703\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " Dec 13 06:43:30 crc kubenswrapper[5048]: I1213 06:43:30.943085 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1de0ebfd-b283-4790-badb-fb78d80e6703-console-serving-cert\") pod \"1de0ebfd-b283-4790-badb-fb78d80e6703\" (UID: \"1de0ebfd-b283-4790-badb-fb78d80e6703\") " Dec 13 06:43:30 crc kubenswrapper[5048]: I1213 06:43:30.943409 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "1de0ebfd-b283-4790-badb-fb78d80e6703" (UID: "1de0ebfd-b283-4790-badb-fb78d80e6703"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:43:30 crc kubenswrapper[5048]: I1213 06:43:30.943473 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1de0ebfd-b283-4790-badb-fb78d80e6703" (UID: "1de0ebfd-b283-4790-badb-fb78d80e6703"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:43:30 crc kubenswrapper[5048]: I1213 06:43:30.943941 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-service-ca" (OuterVolumeSpecName: "service-ca") pod "1de0ebfd-b283-4790-badb-fb78d80e6703" (UID: "1de0ebfd-b283-4790-badb-fb78d80e6703"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:43:30 crc kubenswrapper[5048]: I1213 06:43:30.943970 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-console-config" (OuterVolumeSpecName: "console-config") pod "1de0ebfd-b283-4790-badb-fb78d80e6703" (UID: "1de0ebfd-b283-4790-badb-fb78d80e6703"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:43:30 crc kubenswrapper[5048]: I1213 06:43:30.948642 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1de0ebfd-b283-4790-badb-fb78d80e6703-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "1de0ebfd-b283-4790-badb-fb78d80e6703" (UID: "1de0ebfd-b283-4790-badb-fb78d80e6703"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:43:30 crc kubenswrapper[5048]: I1213 06:43:30.949029 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1de0ebfd-b283-4790-badb-fb78d80e6703-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "1de0ebfd-b283-4790-badb-fb78d80e6703" (UID: "1de0ebfd-b283-4790-badb-fb78d80e6703"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:43:30 crc kubenswrapper[5048]: I1213 06:43:30.949748 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1de0ebfd-b283-4790-badb-fb78d80e6703-kube-api-access-zn766" (OuterVolumeSpecName: "kube-api-access-zn766") pod "1de0ebfd-b283-4790-badb-fb78d80e6703" (UID: "1de0ebfd-b283-4790-badb-fb78d80e6703"). InnerVolumeSpecName "kube-api-access-zn766". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:43:31 crc kubenswrapper[5048]: I1213 06:43:31.044900 5048 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:43:31 crc kubenswrapper[5048]: I1213 06:43:31.044935 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zn766\" (UniqueName: \"kubernetes.io/projected/1de0ebfd-b283-4790-badb-fb78d80e6703-kube-api-access-zn766\") on node \"crc\" DevicePath \"\"" Dec 13 06:43:31 crc kubenswrapper[5048]: I1213 06:43:31.044947 5048 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-service-ca\") on node \"crc\" DevicePath \"\"" Dec 13 06:43:31 crc kubenswrapper[5048]: I1213 06:43:31.044955 5048 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:43:31 crc kubenswrapper[5048]: I1213 06:43:31.044964 5048 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1de0ebfd-b283-4790-badb-fb78d80e6703-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:43:31 crc kubenswrapper[5048]: I1213 06:43:31.044972 5048 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1de0ebfd-b283-4790-badb-fb78d80e6703-console-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:43:31 crc kubenswrapper[5048]: I1213 06:43:31.044980 5048 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1de0ebfd-b283-4790-badb-fb78d80e6703-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 13 06:43:31 crc kubenswrapper[5048]: I1213 06:43:31.578907 5048 generic.go:334] "Generic (PLEG): container finished" podID="fe828039-1927-4005-a732-ce1cb2fb898c" containerID="dce485d08f01db10a2432f1376d0e108302c783cdf77c2beccf22c134d529102" exitCode=0 Dec 13 06:43:31 crc kubenswrapper[5048]: I1213 06:43:31.579004 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6" event={"ID":"fe828039-1927-4005-a732-ce1cb2fb898c","Type":"ContainerDied","Data":"dce485d08f01db10a2432f1376d0e108302c783cdf77c2beccf22c134d529102"} Dec 13 06:43:31 crc kubenswrapper[5048]: I1213 06:43:31.580627 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-5nt4n_1de0ebfd-b283-4790-badb-fb78d80e6703/console/0.log" Dec 13 06:43:31 crc kubenswrapper[5048]: I1213 06:43:31.580660 5048 generic.go:334] "Generic (PLEG): container finished" podID="1de0ebfd-b283-4790-badb-fb78d80e6703" containerID="93370343880a95b30a6b8d074b79fd64176354922c3f0aa562093d29ff66ec23" exitCode=2 Dec 13 06:43:31 crc kubenswrapper[5048]: I1213 06:43:31.580683 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5nt4n" event={"ID":"1de0ebfd-b283-4790-badb-fb78d80e6703","Type":"ContainerDied","Data":"93370343880a95b30a6b8d074b79fd64176354922c3f0aa562093d29ff66ec23"} Dec 13 06:43:31 crc kubenswrapper[5048]: I1213 06:43:31.580708 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5nt4n" event={"ID":"1de0ebfd-b283-4790-badb-fb78d80e6703","Type":"ContainerDied","Data":"fa1086991ce8c29dd86f25da1668d167842b5855915f0f245a829ac74751b99d"} Dec 13 06:43:31 crc kubenswrapper[5048]: I1213 06:43:31.580728 5048 scope.go:117] "RemoveContainer" containerID="93370343880a95b30a6b8d074b79fd64176354922c3f0aa562093d29ff66ec23" Dec 13 06:43:31 crc kubenswrapper[5048]: I1213 06:43:31.580751 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5nt4n" Dec 13 06:43:31 crc kubenswrapper[5048]: I1213 06:43:31.608603 5048 scope.go:117] "RemoveContainer" containerID="93370343880a95b30a6b8d074b79fd64176354922c3f0aa562093d29ff66ec23" Dec 13 06:43:31 crc kubenswrapper[5048]: E1213 06:43:31.609209 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93370343880a95b30a6b8d074b79fd64176354922c3f0aa562093d29ff66ec23\": container with ID starting with 93370343880a95b30a6b8d074b79fd64176354922c3f0aa562093d29ff66ec23 not found: ID does not exist" containerID="93370343880a95b30a6b8d074b79fd64176354922c3f0aa562093d29ff66ec23" Dec 13 06:43:31 crc kubenswrapper[5048]: I1213 06:43:31.609244 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93370343880a95b30a6b8d074b79fd64176354922c3f0aa562093d29ff66ec23"} err="failed to get container status \"93370343880a95b30a6b8d074b79fd64176354922c3f0aa562093d29ff66ec23\": rpc error: code = NotFound desc = could not find container \"93370343880a95b30a6b8d074b79fd64176354922c3f0aa562093d29ff66ec23\": container with ID starting with 93370343880a95b30a6b8d074b79fd64176354922c3f0aa562093d29ff66ec23 not found: ID does not exist" Dec 13 06:43:31 crc kubenswrapper[5048]: I1213 06:43:31.619863 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-5nt4n"] Dec 13 06:43:31 crc kubenswrapper[5048]: I1213 06:43:31.625508 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-5nt4n"] Dec 13 06:43:32 crc kubenswrapper[5048]: I1213 06:43:32.576616 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1de0ebfd-b283-4790-badb-fb78d80e6703" path="/var/lib/kubelet/pods/1de0ebfd-b283-4790-badb-fb78d80e6703/volumes" Dec 13 06:43:32 crc kubenswrapper[5048]: I1213 06:43:32.591895 5048 generic.go:334] "Generic (PLEG): container finished" podID="fe828039-1927-4005-a732-ce1cb2fb898c" containerID="a6fd58180f5d20ef17d989a49d288d869943d83986b89461909c52aca0e49e50" exitCode=0 Dec 13 06:43:32 crc kubenswrapper[5048]: I1213 06:43:32.591951 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6" event={"ID":"fe828039-1927-4005-a732-ce1cb2fb898c","Type":"ContainerDied","Data":"a6fd58180f5d20ef17d989a49d288d869943d83986b89461909c52aca0e49e50"} Dec 13 06:43:33 crc kubenswrapper[5048]: I1213 06:43:33.825262 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6" Dec 13 06:43:33 crc kubenswrapper[5048]: I1213 06:43:33.882238 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fe828039-1927-4005-a732-ce1cb2fb898c-util\") pod \"fe828039-1927-4005-a732-ce1cb2fb898c\" (UID: \"fe828039-1927-4005-a732-ce1cb2fb898c\") " Dec 13 06:43:33 crc kubenswrapper[5048]: I1213 06:43:33.882321 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t96rq\" (UniqueName: \"kubernetes.io/projected/fe828039-1927-4005-a732-ce1cb2fb898c-kube-api-access-t96rq\") pod \"fe828039-1927-4005-a732-ce1cb2fb898c\" (UID: \"fe828039-1927-4005-a732-ce1cb2fb898c\") " Dec 13 06:43:33 crc kubenswrapper[5048]: I1213 06:43:33.882382 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fe828039-1927-4005-a732-ce1cb2fb898c-bundle\") pod \"fe828039-1927-4005-a732-ce1cb2fb898c\" (UID: \"fe828039-1927-4005-a732-ce1cb2fb898c\") " Dec 13 06:43:33 crc kubenswrapper[5048]: I1213 06:43:33.883761 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe828039-1927-4005-a732-ce1cb2fb898c-bundle" (OuterVolumeSpecName: "bundle") pod "fe828039-1927-4005-a732-ce1cb2fb898c" (UID: "fe828039-1927-4005-a732-ce1cb2fb898c"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:43:33 crc kubenswrapper[5048]: I1213 06:43:33.887594 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe828039-1927-4005-a732-ce1cb2fb898c-kube-api-access-t96rq" (OuterVolumeSpecName: "kube-api-access-t96rq") pod "fe828039-1927-4005-a732-ce1cb2fb898c" (UID: "fe828039-1927-4005-a732-ce1cb2fb898c"). InnerVolumeSpecName "kube-api-access-t96rq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:43:33 crc kubenswrapper[5048]: I1213 06:43:33.894661 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe828039-1927-4005-a732-ce1cb2fb898c-util" (OuterVolumeSpecName: "util") pod "fe828039-1927-4005-a732-ce1cb2fb898c" (UID: "fe828039-1927-4005-a732-ce1cb2fb898c"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:43:33 crc kubenswrapper[5048]: I1213 06:43:33.983627 5048 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fe828039-1927-4005-a732-ce1cb2fb898c-util\") on node \"crc\" DevicePath \"\"" Dec 13 06:43:33 crc kubenswrapper[5048]: I1213 06:43:33.983681 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t96rq\" (UniqueName: \"kubernetes.io/projected/fe828039-1927-4005-a732-ce1cb2fb898c-kube-api-access-t96rq\") on node \"crc\" DevicePath \"\"" Dec 13 06:43:33 crc kubenswrapper[5048]: I1213 06:43:33.983698 5048 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fe828039-1927-4005-a732-ce1cb2fb898c-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:43:34 crc kubenswrapper[5048]: I1213 06:43:34.606660 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6" event={"ID":"fe828039-1927-4005-a732-ce1cb2fb898c","Type":"ContainerDied","Data":"2e7a9463b8f1eb6d7ef9081ac0abf7733fdb00125f6616eacb351520915da63e"} Dec 13 06:43:34 crc kubenswrapper[5048]: I1213 06:43:34.606710 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e7a9463b8f1eb6d7ef9081ac0abf7733fdb00125f6616eacb351520915da63e" Dec 13 06:43:34 crc kubenswrapper[5048]: I1213 06:43:34.606712 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.515298 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-955d4d9d6-6w2dd"] Dec 13 06:43:42 crc kubenswrapper[5048]: E1213 06:43:42.515951 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe828039-1927-4005-a732-ce1cb2fb898c" containerName="util" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.515962 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe828039-1927-4005-a732-ce1cb2fb898c" containerName="util" Dec 13 06:43:42 crc kubenswrapper[5048]: E1213 06:43:42.515969 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe828039-1927-4005-a732-ce1cb2fb898c" containerName="pull" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.515975 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe828039-1927-4005-a732-ce1cb2fb898c" containerName="pull" Dec 13 06:43:42 crc kubenswrapper[5048]: E1213 06:43:42.515982 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1de0ebfd-b283-4790-badb-fb78d80e6703" containerName="console" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.515988 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="1de0ebfd-b283-4790-badb-fb78d80e6703" containerName="console" Dec 13 06:43:42 crc kubenswrapper[5048]: E1213 06:43:42.516000 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe828039-1927-4005-a732-ce1cb2fb898c" containerName="extract" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.516007 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe828039-1927-4005-a732-ce1cb2fb898c" containerName="extract" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.516109 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="1de0ebfd-b283-4790-badb-fb78d80e6703" containerName="console" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.516126 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe828039-1927-4005-a732-ce1cb2fb898c" containerName="extract" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.516494 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-955d4d9d6-6w2dd" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.518981 5048 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.522427 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.522425 5048 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-mqhwf" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.522558 5048 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.522583 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.540537 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-955d4d9d6-6w2dd"] Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.597190 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4hdqf\" (UniqueName: \"kubernetes.io/projected/320194d2-61c0-4149-9458-a880711d4edf-kube-api-access-4hdqf\") pod \"metallb-operator-controller-manager-955d4d9d6-6w2dd\" (UID: \"320194d2-61c0-4149-9458-a880711d4edf\") " pod="metallb-system/metallb-operator-controller-manager-955d4d9d6-6w2dd" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.597265 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/320194d2-61c0-4149-9458-a880711d4edf-apiservice-cert\") pod \"metallb-operator-controller-manager-955d4d9d6-6w2dd\" (UID: \"320194d2-61c0-4149-9458-a880711d4edf\") " pod="metallb-system/metallb-operator-controller-manager-955d4d9d6-6w2dd" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.597520 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/320194d2-61c0-4149-9458-a880711d4edf-webhook-cert\") pod \"metallb-operator-controller-manager-955d4d9d6-6w2dd\" (UID: \"320194d2-61c0-4149-9458-a880711d4edf\") " pod="metallb-system/metallb-operator-controller-manager-955d4d9d6-6w2dd" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.698699 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/320194d2-61c0-4149-9458-a880711d4edf-apiservice-cert\") pod \"metallb-operator-controller-manager-955d4d9d6-6w2dd\" (UID: \"320194d2-61c0-4149-9458-a880711d4edf\") " pod="metallb-system/metallb-operator-controller-manager-955d4d9d6-6w2dd" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.699044 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/320194d2-61c0-4149-9458-a880711d4edf-webhook-cert\") pod \"metallb-operator-controller-manager-955d4d9d6-6w2dd\" (UID: \"320194d2-61c0-4149-9458-a880711d4edf\") " pod="metallb-system/metallb-operator-controller-manager-955d4d9d6-6w2dd" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.699079 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4hdqf\" (UniqueName: \"kubernetes.io/projected/320194d2-61c0-4149-9458-a880711d4edf-kube-api-access-4hdqf\") pod \"metallb-operator-controller-manager-955d4d9d6-6w2dd\" (UID: \"320194d2-61c0-4149-9458-a880711d4edf\") " pod="metallb-system/metallb-operator-controller-manager-955d4d9d6-6w2dd" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.704758 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/320194d2-61c0-4149-9458-a880711d4edf-apiservice-cert\") pod \"metallb-operator-controller-manager-955d4d9d6-6w2dd\" (UID: \"320194d2-61c0-4149-9458-a880711d4edf\") " pod="metallb-system/metallb-operator-controller-manager-955d4d9d6-6w2dd" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.717068 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/320194d2-61c0-4149-9458-a880711d4edf-webhook-cert\") pod \"metallb-operator-controller-manager-955d4d9d6-6w2dd\" (UID: \"320194d2-61c0-4149-9458-a880711d4edf\") " pod="metallb-system/metallb-operator-controller-manager-955d4d9d6-6w2dd" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.717561 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4hdqf\" (UniqueName: \"kubernetes.io/projected/320194d2-61c0-4149-9458-a880711d4edf-kube-api-access-4hdqf\") pod \"metallb-operator-controller-manager-955d4d9d6-6w2dd\" (UID: \"320194d2-61c0-4149-9458-a880711d4edf\") " pod="metallb-system/metallb-operator-controller-manager-955d4d9d6-6w2dd" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.836227 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-955d4d9d6-6w2dd" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.890880 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-56bb4c4c65-p9qls"] Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.891755 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-56bb4c4c65-p9qls" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.894659 5048 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.894659 5048 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.894723 5048 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-bs7hn" Dec 13 06:43:42 crc kubenswrapper[5048]: I1213 06:43:42.912115 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-56bb4c4c65-p9qls"] Dec 13 06:43:43 crc kubenswrapper[5048]: I1213 06:43:43.004980 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0618bcc5-697d-4f5b-bbab-a84d868b5d32-apiservice-cert\") pod \"metallb-operator-webhook-server-56bb4c4c65-p9qls\" (UID: \"0618bcc5-697d-4f5b-bbab-a84d868b5d32\") " pod="metallb-system/metallb-operator-webhook-server-56bb4c4c65-p9qls" Dec 13 06:43:43 crc kubenswrapper[5048]: I1213 06:43:43.005356 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0618bcc5-697d-4f5b-bbab-a84d868b5d32-webhook-cert\") pod \"metallb-operator-webhook-server-56bb4c4c65-p9qls\" (UID: \"0618bcc5-697d-4f5b-bbab-a84d868b5d32\") " pod="metallb-system/metallb-operator-webhook-server-56bb4c4c65-p9qls" Dec 13 06:43:43 crc kubenswrapper[5048]: I1213 06:43:43.005401 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skrhb\" (UniqueName: \"kubernetes.io/projected/0618bcc5-697d-4f5b-bbab-a84d868b5d32-kube-api-access-skrhb\") pod \"metallb-operator-webhook-server-56bb4c4c65-p9qls\" (UID: \"0618bcc5-697d-4f5b-bbab-a84d868b5d32\") " pod="metallb-system/metallb-operator-webhook-server-56bb4c4c65-p9qls" Dec 13 06:43:43 crc kubenswrapper[5048]: I1213 06:43:43.106706 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0618bcc5-697d-4f5b-bbab-a84d868b5d32-apiservice-cert\") pod \"metallb-operator-webhook-server-56bb4c4c65-p9qls\" (UID: \"0618bcc5-697d-4f5b-bbab-a84d868b5d32\") " pod="metallb-system/metallb-operator-webhook-server-56bb4c4c65-p9qls" Dec 13 06:43:43 crc kubenswrapper[5048]: I1213 06:43:43.106762 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0618bcc5-697d-4f5b-bbab-a84d868b5d32-webhook-cert\") pod \"metallb-operator-webhook-server-56bb4c4c65-p9qls\" (UID: \"0618bcc5-697d-4f5b-bbab-a84d868b5d32\") " pod="metallb-system/metallb-operator-webhook-server-56bb4c4c65-p9qls" Dec 13 06:43:43 crc kubenswrapper[5048]: I1213 06:43:43.106796 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skrhb\" (UniqueName: \"kubernetes.io/projected/0618bcc5-697d-4f5b-bbab-a84d868b5d32-kube-api-access-skrhb\") pod \"metallb-operator-webhook-server-56bb4c4c65-p9qls\" (UID: \"0618bcc5-697d-4f5b-bbab-a84d868b5d32\") " pod="metallb-system/metallb-operator-webhook-server-56bb4c4c65-p9qls" Dec 13 06:43:43 crc kubenswrapper[5048]: I1213 06:43:43.117021 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0618bcc5-697d-4f5b-bbab-a84d868b5d32-apiservice-cert\") pod \"metallb-operator-webhook-server-56bb4c4c65-p9qls\" (UID: \"0618bcc5-697d-4f5b-bbab-a84d868b5d32\") " pod="metallb-system/metallb-operator-webhook-server-56bb4c4c65-p9qls" Dec 13 06:43:43 crc kubenswrapper[5048]: I1213 06:43:43.121986 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-955d4d9d6-6w2dd"] Dec 13 06:43:43 crc kubenswrapper[5048]: I1213 06:43:43.122807 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0618bcc5-697d-4f5b-bbab-a84d868b5d32-webhook-cert\") pod \"metallb-operator-webhook-server-56bb4c4c65-p9qls\" (UID: \"0618bcc5-697d-4f5b-bbab-a84d868b5d32\") " pod="metallb-system/metallb-operator-webhook-server-56bb4c4c65-p9qls" Dec 13 06:43:43 crc kubenswrapper[5048]: I1213 06:43:43.123013 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skrhb\" (UniqueName: \"kubernetes.io/projected/0618bcc5-697d-4f5b-bbab-a84d868b5d32-kube-api-access-skrhb\") pod \"metallb-operator-webhook-server-56bb4c4c65-p9qls\" (UID: \"0618bcc5-697d-4f5b-bbab-a84d868b5d32\") " pod="metallb-system/metallb-operator-webhook-server-56bb4c4c65-p9qls" Dec 13 06:43:43 crc kubenswrapper[5048]: I1213 06:43:43.210199 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-56bb4c4c65-p9qls" Dec 13 06:43:43 crc kubenswrapper[5048]: I1213 06:43:43.431237 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-56bb4c4c65-p9qls"] Dec 13 06:43:43 crc kubenswrapper[5048]: W1213 06:43:43.446570 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0618bcc5_697d_4f5b_bbab_a84d868b5d32.slice/crio-ec47beaa2224b7fcbc48d6f37bad999267958aba5439389c629e94713cd657ae WatchSource:0}: Error finding container ec47beaa2224b7fcbc48d6f37bad999267958aba5439389c629e94713cd657ae: Status 404 returned error can't find the container with id ec47beaa2224b7fcbc48d6f37bad999267958aba5439389c629e94713cd657ae Dec 13 06:43:43 crc kubenswrapper[5048]: I1213 06:43:43.666397 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-955d4d9d6-6w2dd" event={"ID":"320194d2-61c0-4149-9458-a880711d4edf","Type":"ContainerStarted","Data":"c41c3f47b112c1e9868d44b22b0eca056e182f248d23685d3600e1557587ca8f"} Dec 13 06:43:43 crc kubenswrapper[5048]: I1213 06:43:43.667483 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-56bb4c4c65-p9qls" event={"ID":"0618bcc5-697d-4f5b-bbab-a84d868b5d32","Type":"ContainerStarted","Data":"ec47beaa2224b7fcbc48d6f37bad999267958aba5439389c629e94713cd657ae"} Dec 13 06:43:46 crc kubenswrapper[5048]: I1213 06:43:46.686694 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-955d4d9d6-6w2dd" event={"ID":"320194d2-61c0-4149-9458-a880711d4edf","Type":"ContainerStarted","Data":"77168ca383d63bfc0ac015b0f34d64d73dcfb7036d0044d903c501a5dfb56385"} Dec 13 06:43:46 crc kubenswrapper[5048]: I1213 06:43:46.687197 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-955d4d9d6-6w2dd" Dec 13 06:43:46 crc kubenswrapper[5048]: I1213 06:43:46.707357 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-955d4d9d6-6w2dd" podStartSLOduration=2.085456869 podStartE2EDuration="4.70733802s" podCreationTimestamp="2025-12-13 06:43:42 +0000 UTC" firstStartedPulling="2025-12-13 06:43:43.135135431 +0000 UTC m=+857.001730012" lastFinishedPulling="2025-12-13 06:43:45.757016582 +0000 UTC m=+859.623611163" observedRunningTime="2025-12-13 06:43:46.705067432 +0000 UTC m=+860.571662023" watchObservedRunningTime="2025-12-13 06:43:46.70733802 +0000 UTC m=+860.573932601" Dec 13 06:43:48 crc kubenswrapper[5048]: I1213 06:43:48.698225 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-56bb4c4c65-p9qls" event={"ID":"0618bcc5-697d-4f5b-bbab-a84d868b5d32","Type":"ContainerStarted","Data":"a9256653cb7e9f6c1f0d6edd1ce9e0b082a561a9b18ce2f88bfa327dc35b04bb"} Dec 13 06:43:48 crc kubenswrapper[5048]: I1213 06:43:48.698556 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-56bb4c4c65-p9qls" Dec 13 06:43:48 crc kubenswrapper[5048]: I1213 06:43:48.720364 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-56bb4c4c65-p9qls" podStartSLOduration=2.458222345 podStartE2EDuration="6.720344398s" podCreationTimestamp="2025-12-13 06:43:42 +0000 UTC" firstStartedPulling="2025-12-13 06:43:43.448598391 +0000 UTC m=+857.315192972" lastFinishedPulling="2025-12-13 06:43:47.710720444 +0000 UTC m=+861.577315025" observedRunningTime="2025-12-13 06:43:48.716961801 +0000 UTC m=+862.583556402" watchObservedRunningTime="2025-12-13 06:43:48.720344398 +0000 UTC m=+862.586938999" Dec 13 06:44:03 crc kubenswrapper[5048]: I1213 06:44:03.216811 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-56bb4c4c65-p9qls" Dec 13 06:44:15 crc kubenswrapper[5048]: I1213 06:44:15.669922 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-9fx7g"] Dec 13 06:44:15 crc kubenswrapper[5048]: I1213 06:44:15.671596 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9fx7g" Dec 13 06:44:15 crc kubenswrapper[5048]: I1213 06:44:15.683889 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9fx7g"] Dec 13 06:44:15 crc kubenswrapper[5048]: I1213 06:44:15.763261 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5-catalog-content\") pod \"community-operators-9fx7g\" (UID: \"cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5\") " pod="openshift-marketplace/community-operators-9fx7g" Dec 13 06:44:15 crc kubenswrapper[5048]: I1213 06:44:15.763347 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5-utilities\") pod \"community-operators-9fx7g\" (UID: \"cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5\") " pod="openshift-marketplace/community-operators-9fx7g" Dec 13 06:44:15 crc kubenswrapper[5048]: I1213 06:44:15.763402 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2bzg\" (UniqueName: \"kubernetes.io/projected/cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5-kube-api-access-f2bzg\") pod \"community-operators-9fx7g\" (UID: \"cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5\") " pod="openshift-marketplace/community-operators-9fx7g" Dec 13 06:44:15 crc kubenswrapper[5048]: I1213 06:44:15.864981 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5-utilities\") pod \"community-operators-9fx7g\" (UID: \"cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5\") " pod="openshift-marketplace/community-operators-9fx7g" Dec 13 06:44:15 crc kubenswrapper[5048]: I1213 06:44:15.865044 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f2bzg\" (UniqueName: \"kubernetes.io/projected/cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5-kube-api-access-f2bzg\") pod \"community-operators-9fx7g\" (UID: \"cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5\") " pod="openshift-marketplace/community-operators-9fx7g" Dec 13 06:44:15 crc kubenswrapper[5048]: I1213 06:44:15.865097 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5-catalog-content\") pod \"community-operators-9fx7g\" (UID: \"cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5\") " pod="openshift-marketplace/community-operators-9fx7g" Dec 13 06:44:15 crc kubenswrapper[5048]: I1213 06:44:15.865687 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5-catalog-content\") pod \"community-operators-9fx7g\" (UID: \"cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5\") " pod="openshift-marketplace/community-operators-9fx7g" Dec 13 06:44:15 crc kubenswrapper[5048]: I1213 06:44:15.865689 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5-utilities\") pod \"community-operators-9fx7g\" (UID: \"cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5\") " pod="openshift-marketplace/community-operators-9fx7g" Dec 13 06:44:15 crc kubenswrapper[5048]: I1213 06:44:15.885354 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2bzg\" (UniqueName: \"kubernetes.io/projected/cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5-kube-api-access-f2bzg\") pod \"community-operators-9fx7g\" (UID: \"cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5\") " pod="openshift-marketplace/community-operators-9fx7g" Dec 13 06:44:15 crc kubenswrapper[5048]: I1213 06:44:15.991128 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9fx7g" Dec 13 06:44:16 crc kubenswrapper[5048]: I1213 06:44:16.470408 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9fx7g"] Dec 13 06:44:16 crc kubenswrapper[5048]: I1213 06:44:16.857028 5048 generic.go:334] "Generic (PLEG): container finished" podID="cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5" containerID="426c4e10ca01e7399bd6acbe8944e12ddb6621d979b92830bb47fb8dfc808ab2" exitCode=0 Dec 13 06:44:16 crc kubenswrapper[5048]: I1213 06:44:16.857179 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9fx7g" event={"ID":"cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5","Type":"ContainerDied","Data":"426c4e10ca01e7399bd6acbe8944e12ddb6621d979b92830bb47fb8dfc808ab2"} Dec 13 06:44:16 crc kubenswrapper[5048]: I1213 06:44:16.857285 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9fx7g" event={"ID":"cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5","Type":"ContainerStarted","Data":"7f2eabfc6578c99acae3428f47f33f736fd37eeb0671c9c1f768573ebbdbf1ca"} Dec 13 06:44:18 crc kubenswrapper[5048]: I1213 06:44:18.869089 5048 generic.go:334] "Generic (PLEG): container finished" podID="cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5" containerID="d92f6dc7fcf9e6b4b165f5a0d908dccaa3fb46aefd28e4347cbea2ab50434712" exitCode=0 Dec 13 06:44:18 crc kubenswrapper[5048]: I1213 06:44:18.869130 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9fx7g" event={"ID":"cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5","Type":"ContainerDied","Data":"d92f6dc7fcf9e6b4b165f5a0d908dccaa3fb46aefd28e4347cbea2ab50434712"} Dec 13 06:44:19 crc kubenswrapper[5048]: I1213 06:44:19.879247 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9fx7g" event={"ID":"cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5","Type":"ContainerStarted","Data":"ad9b1e7417fe56eb3d4a8e590262853ea0f6b157c17d803f1f912af1bfda5bda"} Dec 13 06:44:19 crc kubenswrapper[5048]: I1213 06:44:19.908375 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-9fx7g" podStartSLOduration=2.402072864 podStartE2EDuration="4.908356794s" podCreationTimestamp="2025-12-13 06:44:15 +0000 UTC" firstStartedPulling="2025-12-13 06:44:16.858817171 +0000 UTC m=+890.725411752" lastFinishedPulling="2025-12-13 06:44:19.365101101 +0000 UTC m=+893.231695682" observedRunningTime="2025-12-13 06:44:19.904230107 +0000 UTC m=+893.770824708" watchObservedRunningTime="2025-12-13 06:44:19.908356794 +0000 UTC m=+893.774951375" Dec 13 06:44:22 crc kubenswrapper[5048]: I1213 06:44:22.839992 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-955d4d9d6-6w2dd" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.313940 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lhdr4"] Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.315022 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lhdr4" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.328835 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lhdr4"] Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.456173 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a815bb-5ba1-4af1-a3de-45df9ae28539-utilities\") pod \"redhat-marketplace-lhdr4\" (UID: \"66a815bb-5ba1-4af1-a3de-45df9ae28539\") " pod="openshift-marketplace/redhat-marketplace-lhdr4" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.456241 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkl77\" (UniqueName: \"kubernetes.io/projected/66a815bb-5ba1-4af1-a3de-45df9ae28539-kube-api-access-dkl77\") pod \"redhat-marketplace-lhdr4\" (UID: \"66a815bb-5ba1-4af1-a3de-45df9ae28539\") " pod="openshift-marketplace/redhat-marketplace-lhdr4" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.456318 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a815bb-5ba1-4af1-a3de-45df9ae28539-catalog-content\") pod \"redhat-marketplace-lhdr4\" (UID: \"66a815bb-5ba1-4af1-a3de-45df9ae28539\") " pod="openshift-marketplace/redhat-marketplace-lhdr4" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.557619 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkl77\" (UniqueName: \"kubernetes.io/projected/66a815bb-5ba1-4af1-a3de-45df9ae28539-kube-api-access-dkl77\") pod \"redhat-marketplace-lhdr4\" (UID: \"66a815bb-5ba1-4af1-a3de-45df9ae28539\") " pod="openshift-marketplace/redhat-marketplace-lhdr4" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.558014 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a815bb-5ba1-4af1-a3de-45df9ae28539-catalog-content\") pod \"redhat-marketplace-lhdr4\" (UID: \"66a815bb-5ba1-4af1-a3de-45df9ae28539\") " pod="openshift-marketplace/redhat-marketplace-lhdr4" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.558063 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a815bb-5ba1-4af1-a3de-45df9ae28539-utilities\") pod \"redhat-marketplace-lhdr4\" (UID: \"66a815bb-5ba1-4af1-a3de-45df9ae28539\") " pod="openshift-marketplace/redhat-marketplace-lhdr4" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.558605 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a815bb-5ba1-4af1-a3de-45df9ae28539-catalog-content\") pod \"redhat-marketplace-lhdr4\" (UID: \"66a815bb-5ba1-4af1-a3de-45df9ae28539\") " pod="openshift-marketplace/redhat-marketplace-lhdr4" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.558645 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a815bb-5ba1-4af1-a3de-45df9ae28539-utilities\") pod \"redhat-marketplace-lhdr4\" (UID: \"66a815bb-5ba1-4af1-a3de-45df9ae28539\") " pod="openshift-marketplace/redhat-marketplace-lhdr4" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.587615 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkl77\" (UniqueName: \"kubernetes.io/projected/66a815bb-5ba1-4af1-a3de-45df9ae28539-kube-api-access-dkl77\") pod \"redhat-marketplace-lhdr4\" (UID: \"66a815bb-5ba1-4af1-a3de-45df9ae28539\") " pod="openshift-marketplace/redhat-marketplace-lhdr4" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.635561 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7784b6fcf-cwjgt"] Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.636109 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lhdr4" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.636511 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-cwjgt" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.639464 5048 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-vwk7l" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.639700 5048 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.643235 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-rm42c"] Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.645555 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.650936 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7784b6fcf-cwjgt"] Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.653999 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.658971 5048 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.728820 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-fj6dj"] Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.730733 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-fj6dj" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.732955 5048 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.733327 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.733460 5048 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-smmk7" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.733579 5048 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.762843 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-5bddd4b946-x8wnx"] Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.763802 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-5bddd4b946-x8wnx" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.790021 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fb943e2e-85ed-4508-ade6-d16343977d3d-cert\") pod \"frr-k8s-webhook-server-7784b6fcf-cwjgt\" (UID: \"fb943e2e-85ed-4508-ade6-d16343977d3d\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-cwjgt" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.790127 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/b6da7b5a-20a7-4721-b245-c63202188d2f-frr-startup\") pod \"frr-k8s-rm42c\" (UID: \"b6da7b5a-20a7-4721-b245-c63202188d2f\") " pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.790164 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/b6da7b5a-20a7-4721-b245-c63202188d2f-metrics\") pod \"frr-k8s-rm42c\" (UID: \"b6da7b5a-20a7-4721-b245-c63202188d2f\") " pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.790204 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/b6da7b5a-20a7-4721-b245-c63202188d2f-reloader\") pod \"frr-k8s-rm42c\" (UID: \"b6da7b5a-20a7-4721-b245-c63202188d2f\") " pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.790236 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmb88\" (UniqueName: \"kubernetes.io/projected/fb943e2e-85ed-4508-ade6-d16343977d3d-kube-api-access-fmb88\") pod \"frr-k8s-webhook-server-7784b6fcf-cwjgt\" (UID: \"fb943e2e-85ed-4508-ade6-d16343977d3d\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-cwjgt" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.790291 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/b6da7b5a-20a7-4721-b245-c63202188d2f-frr-conf\") pod \"frr-k8s-rm42c\" (UID: \"b6da7b5a-20a7-4721-b245-c63202188d2f\") " pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.790320 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgs4j\" (UniqueName: \"kubernetes.io/projected/b6da7b5a-20a7-4721-b245-c63202188d2f-kube-api-access-xgs4j\") pod \"frr-k8s-rm42c\" (UID: \"b6da7b5a-20a7-4721-b245-c63202188d2f\") " pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.790360 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/b6da7b5a-20a7-4721-b245-c63202188d2f-frr-sockets\") pod \"frr-k8s-rm42c\" (UID: \"b6da7b5a-20a7-4721-b245-c63202188d2f\") " pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.790385 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b6da7b5a-20a7-4721-b245-c63202188d2f-metrics-certs\") pod \"frr-k8s-rm42c\" (UID: \"b6da7b5a-20a7-4721-b245-c63202188d2f\") " pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.791384 5048 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.814960 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-5bddd4b946-x8wnx"] Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.892585 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/b6da7b5a-20a7-4721-b245-c63202188d2f-frr-conf\") pod \"frr-k8s-rm42c\" (UID: \"b6da7b5a-20a7-4721-b245-c63202188d2f\") " pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.892935 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgs4j\" (UniqueName: \"kubernetes.io/projected/b6da7b5a-20a7-4721-b245-c63202188d2f-kube-api-access-xgs4j\") pod \"frr-k8s-rm42c\" (UID: \"b6da7b5a-20a7-4721-b245-c63202188d2f\") " pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.892964 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/b6da7b5a-20a7-4721-b245-c63202188d2f-frr-sockets\") pod \"frr-k8s-rm42c\" (UID: \"b6da7b5a-20a7-4721-b245-c63202188d2f\") " pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.892988 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b6da7b5a-20a7-4721-b245-c63202188d2f-metrics-certs\") pod \"frr-k8s-rm42c\" (UID: \"b6da7b5a-20a7-4721-b245-c63202188d2f\") " pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.893014 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fb943e2e-85ed-4508-ade6-d16343977d3d-cert\") pod \"frr-k8s-webhook-server-7784b6fcf-cwjgt\" (UID: \"fb943e2e-85ed-4508-ade6-d16343977d3d\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-cwjgt" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.893042 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2adbe773-2087-440f-bcdd-91ccb9eaa03f-metrics-certs\") pod \"controller-5bddd4b946-x8wnx\" (UID: \"2adbe773-2087-440f-bcdd-91ccb9eaa03f\") " pod="metallb-system/controller-5bddd4b946-x8wnx" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.893073 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmb4k\" (UniqueName: \"kubernetes.io/projected/3c80ea24-095d-4c79-9328-2b3433da583c-kube-api-access-cmb4k\") pod \"speaker-fj6dj\" (UID: \"3c80ea24-095d-4c79-9328-2b3433da583c\") " pod="metallb-system/speaker-fj6dj" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.893100 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3c80ea24-095d-4c79-9328-2b3433da583c-metrics-certs\") pod \"speaker-fj6dj\" (UID: \"3c80ea24-095d-4c79-9328-2b3433da583c\") " pod="metallb-system/speaker-fj6dj" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.893139 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/b6da7b5a-20a7-4721-b245-c63202188d2f-frr-startup\") pod \"frr-k8s-rm42c\" (UID: \"b6da7b5a-20a7-4721-b245-c63202188d2f\") " pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.893163 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dr45c\" (UniqueName: \"kubernetes.io/projected/2adbe773-2087-440f-bcdd-91ccb9eaa03f-kube-api-access-dr45c\") pod \"controller-5bddd4b946-x8wnx\" (UID: \"2adbe773-2087-440f-bcdd-91ccb9eaa03f\") " pod="metallb-system/controller-5bddd4b946-x8wnx" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.893186 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/b6da7b5a-20a7-4721-b245-c63202188d2f-metrics\") pod \"frr-k8s-rm42c\" (UID: \"b6da7b5a-20a7-4721-b245-c63202188d2f\") " pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.893216 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2adbe773-2087-440f-bcdd-91ccb9eaa03f-cert\") pod \"controller-5bddd4b946-x8wnx\" (UID: \"2adbe773-2087-440f-bcdd-91ccb9eaa03f\") " pod="metallb-system/controller-5bddd4b946-x8wnx" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.893240 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/b6da7b5a-20a7-4721-b245-c63202188d2f-reloader\") pod \"frr-k8s-rm42c\" (UID: \"b6da7b5a-20a7-4721-b245-c63202188d2f\") " pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.893264 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/3c80ea24-095d-4c79-9328-2b3433da583c-metallb-excludel2\") pod \"speaker-fj6dj\" (UID: \"3c80ea24-095d-4c79-9328-2b3433da583c\") " pod="metallb-system/speaker-fj6dj" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.893288 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/3c80ea24-095d-4c79-9328-2b3433da583c-memberlist\") pod \"speaker-fj6dj\" (UID: \"3c80ea24-095d-4c79-9328-2b3433da583c\") " pod="metallb-system/speaker-fj6dj" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.893315 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmb88\" (UniqueName: \"kubernetes.io/projected/fb943e2e-85ed-4508-ade6-d16343977d3d-kube-api-access-fmb88\") pod \"frr-k8s-webhook-server-7784b6fcf-cwjgt\" (UID: \"fb943e2e-85ed-4508-ade6-d16343977d3d\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-cwjgt" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.894203 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/b6da7b5a-20a7-4721-b245-c63202188d2f-frr-sockets\") pod \"frr-k8s-rm42c\" (UID: \"b6da7b5a-20a7-4721-b245-c63202188d2f\") " pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:23 crc kubenswrapper[5048]: E1213 06:44:23.894320 5048 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Dec 13 06:44:23 crc kubenswrapper[5048]: E1213 06:44:23.894361 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b6da7b5a-20a7-4721-b245-c63202188d2f-metrics-certs podName:b6da7b5a-20a7-4721-b245-c63202188d2f nodeName:}" failed. No retries permitted until 2025-12-13 06:44:24.394345464 +0000 UTC m=+898.260940045 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b6da7b5a-20a7-4721-b245-c63202188d2f-metrics-certs") pod "frr-k8s-rm42c" (UID: "b6da7b5a-20a7-4721-b245-c63202188d2f") : secret "frr-k8s-certs-secret" not found Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.898420 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/b6da7b5a-20a7-4721-b245-c63202188d2f-metrics\") pod \"frr-k8s-rm42c\" (UID: \"b6da7b5a-20a7-4721-b245-c63202188d2f\") " pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.900017 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/b6da7b5a-20a7-4721-b245-c63202188d2f-frr-startup\") pod \"frr-k8s-rm42c\" (UID: \"b6da7b5a-20a7-4721-b245-c63202188d2f\") " pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.900380 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/b6da7b5a-20a7-4721-b245-c63202188d2f-reloader\") pod \"frr-k8s-rm42c\" (UID: \"b6da7b5a-20a7-4721-b245-c63202188d2f\") " pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.900918 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/b6da7b5a-20a7-4721-b245-c63202188d2f-frr-conf\") pod \"frr-k8s-rm42c\" (UID: \"b6da7b5a-20a7-4721-b245-c63202188d2f\") " pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.914186 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fb943e2e-85ed-4508-ade6-d16343977d3d-cert\") pod \"frr-k8s-webhook-server-7784b6fcf-cwjgt\" (UID: \"fb943e2e-85ed-4508-ade6-d16343977d3d\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-cwjgt" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.919827 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmb88\" (UniqueName: \"kubernetes.io/projected/fb943e2e-85ed-4508-ade6-d16343977d3d-kube-api-access-fmb88\") pod \"frr-k8s-webhook-server-7784b6fcf-cwjgt\" (UID: \"fb943e2e-85ed-4508-ade6-d16343977d3d\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-cwjgt" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.937412 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgs4j\" (UniqueName: \"kubernetes.io/projected/b6da7b5a-20a7-4721-b245-c63202188d2f-kube-api-access-xgs4j\") pod \"frr-k8s-rm42c\" (UID: \"b6da7b5a-20a7-4721-b245-c63202188d2f\") " pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.962903 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-cwjgt" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.995085 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dr45c\" (UniqueName: \"kubernetes.io/projected/2adbe773-2087-440f-bcdd-91ccb9eaa03f-kube-api-access-dr45c\") pod \"controller-5bddd4b946-x8wnx\" (UID: \"2adbe773-2087-440f-bcdd-91ccb9eaa03f\") " pod="metallb-system/controller-5bddd4b946-x8wnx" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.995135 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2adbe773-2087-440f-bcdd-91ccb9eaa03f-cert\") pod \"controller-5bddd4b946-x8wnx\" (UID: \"2adbe773-2087-440f-bcdd-91ccb9eaa03f\") " pod="metallb-system/controller-5bddd4b946-x8wnx" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.995157 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/3c80ea24-095d-4c79-9328-2b3433da583c-metallb-excludel2\") pod \"speaker-fj6dj\" (UID: \"3c80ea24-095d-4c79-9328-2b3433da583c\") " pod="metallb-system/speaker-fj6dj" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.995175 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/3c80ea24-095d-4c79-9328-2b3433da583c-memberlist\") pod \"speaker-fj6dj\" (UID: \"3c80ea24-095d-4c79-9328-2b3433da583c\") " pod="metallb-system/speaker-fj6dj" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.995230 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2adbe773-2087-440f-bcdd-91ccb9eaa03f-metrics-certs\") pod \"controller-5bddd4b946-x8wnx\" (UID: \"2adbe773-2087-440f-bcdd-91ccb9eaa03f\") " pod="metallb-system/controller-5bddd4b946-x8wnx" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.995250 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmb4k\" (UniqueName: \"kubernetes.io/projected/3c80ea24-095d-4c79-9328-2b3433da583c-kube-api-access-cmb4k\") pod \"speaker-fj6dj\" (UID: \"3c80ea24-095d-4c79-9328-2b3433da583c\") " pod="metallb-system/speaker-fj6dj" Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.995269 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3c80ea24-095d-4c79-9328-2b3433da583c-metrics-certs\") pod \"speaker-fj6dj\" (UID: \"3c80ea24-095d-4c79-9328-2b3433da583c\") " pod="metallb-system/speaker-fj6dj" Dec 13 06:44:23 crc kubenswrapper[5048]: E1213 06:44:23.995376 5048 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Dec 13 06:44:23 crc kubenswrapper[5048]: E1213 06:44:23.995423 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3c80ea24-095d-4c79-9328-2b3433da583c-metrics-certs podName:3c80ea24-095d-4c79-9328-2b3433da583c nodeName:}" failed. No retries permitted until 2025-12-13 06:44:24.495407092 +0000 UTC m=+898.362001663 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3c80ea24-095d-4c79-9328-2b3433da583c-metrics-certs") pod "speaker-fj6dj" (UID: "3c80ea24-095d-4c79-9328-2b3433da583c") : secret "speaker-certs-secret" not found Dec 13 06:44:23 crc kubenswrapper[5048]: E1213 06:44:23.996065 5048 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Dec 13 06:44:23 crc kubenswrapper[5048]: E1213 06:44:23.996092 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2adbe773-2087-440f-bcdd-91ccb9eaa03f-metrics-certs podName:2adbe773-2087-440f-bcdd-91ccb9eaa03f nodeName:}" failed. No retries permitted until 2025-12-13 06:44:24.49608331 +0000 UTC m=+898.362677891 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2adbe773-2087-440f-bcdd-91ccb9eaa03f-metrics-certs") pod "controller-5bddd4b946-x8wnx" (UID: "2adbe773-2087-440f-bcdd-91ccb9eaa03f") : secret "controller-certs-secret" not found Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.996780 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/3c80ea24-095d-4c79-9328-2b3433da583c-metallb-excludel2\") pod \"speaker-fj6dj\" (UID: \"3c80ea24-095d-4c79-9328-2b3433da583c\") " pod="metallb-system/speaker-fj6dj" Dec 13 06:44:23 crc kubenswrapper[5048]: E1213 06:44:23.996837 5048 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Dec 13 06:44:23 crc kubenswrapper[5048]: E1213 06:44:23.996859 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3c80ea24-095d-4c79-9328-2b3433da583c-memberlist podName:3c80ea24-095d-4c79-9328-2b3433da583c nodeName:}" failed. No retries permitted until 2025-12-13 06:44:24.496851229 +0000 UTC m=+898.363445800 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/3c80ea24-095d-4c79-9328-2b3433da583c-memberlist") pod "speaker-fj6dj" (UID: "3c80ea24-095d-4c79-9328-2b3433da583c") : secret "metallb-memberlist" not found Dec 13 06:44:23 crc kubenswrapper[5048]: I1213 06:44:23.998824 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2adbe773-2087-440f-bcdd-91ccb9eaa03f-cert\") pod \"controller-5bddd4b946-x8wnx\" (UID: \"2adbe773-2087-440f-bcdd-91ccb9eaa03f\") " pod="metallb-system/controller-5bddd4b946-x8wnx" Dec 13 06:44:24 crc kubenswrapper[5048]: I1213 06:44:24.015796 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dr45c\" (UniqueName: \"kubernetes.io/projected/2adbe773-2087-440f-bcdd-91ccb9eaa03f-kube-api-access-dr45c\") pod \"controller-5bddd4b946-x8wnx\" (UID: \"2adbe773-2087-440f-bcdd-91ccb9eaa03f\") " pod="metallb-system/controller-5bddd4b946-x8wnx" Dec 13 06:44:24 crc kubenswrapper[5048]: I1213 06:44:24.016982 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmb4k\" (UniqueName: \"kubernetes.io/projected/3c80ea24-095d-4c79-9328-2b3433da583c-kube-api-access-cmb4k\") pod \"speaker-fj6dj\" (UID: \"3c80ea24-095d-4c79-9328-2b3433da583c\") " pod="metallb-system/speaker-fj6dj" Dec 13 06:44:24 crc kubenswrapper[5048]: I1213 06:44:24.183194 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lhdr4"] Dec 13 06:44:24 crc kubenswrapper[5048]: I1213 06:44:24.234896 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7784b6fcf-cwjgt"] Dec 13 06:44:24 crc kubenswrapper[5048]: W1213 06:44:24.242537 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfb943e2e_85ed_4508_ade6_d16343977d3d.slice/crio-9de43fbeb6a47212858b6ed346bb8e427aba7ce0a32399fe29ca6082f6a62883 WatchSource:0}: Error finding container 9de43fbeb6a47212858b6ed346bb8e427aba7ce0a32399fe29ca6082f6a62883: Status 404 returned error can't find the container with id 9de43fbeb6a47212858b6ed346bb8e427aba7ce0a32399fe29ca6082f6a62883 Dec 13 06:44:24 crc kubenswrapper[5048]: I1213 06:44:24.400506 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b6da7b5a-20a7-4721-b245-c63202188d2f-metrics-certs\") pod \"frr-k8s-rm42c\" (UID: \"b6da7b5a-20a7-4721-b245-c63202188d2f\") " pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:24 crc kubenswrapper[5048]: I1213 06:44:24.405372 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b6da7b5a-20a7-4721-b245-c63202188d2f-metrics-certs\") pod \"frr-k8s-rm42c\" (UID: \"b6da7b5a-20a7-4721-b245-c63202188d2f\") " pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:24 crc kubenswrapper[5048]: I1213 06:44:24.501459 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/3c80ea24-095d-4c79-9328-2b3433da583c-memberlist\") pod \"speaker-fj6dj\" (UID: \"3c80ea24-095d-4c79-9328-2b3433da583c\") " pod="metallb-system/speaker-fj6dj" Dec 13 06:44:24 crc kubenswrapper[5048]: I1213 06:44:24.501654 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2adbe773-2087-440f-bcdd-91ccb9eaa03f-metrics-certs\") pod \"controller-5bddd4b946-x8wnx\" (UID: \"2adbe773-2087-440f-bcdd-91ccb9eaa03f\") " pod="metallb-system/controller-5bddd4b946-x8wnx" Dec 13 06:44:24 crc kubenswrapper[5048]: E1213 06:44:24.501671 5048 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Dec 13 06:44:24 crc kubenswrapper[5048]: I1213 06:44:24.501696 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3c80ea24-095d-4c79-9328-2b3433da583c-metrics-certs\") pod \"speaker-fj6dj\" (UID: \"3c80ea24-095d-4c79-9328-2b3433da583c\") " pod="metallb-system/speaker-fj6dj" Dec 13 06:44:24 crc kubenswrapper[5048]: E1213 06:44:24.501735 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3c80ea24-095d-4c79-9328-2b3433da583c-memberlist podName:3c80ea24-095d-4c79-9328-2b3433da583c nodeName:}" failed. No retries permitted until 2025-12-13 06:44:25.501717439 +0000 UTC m=+899.368312020 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/3c80ea24-095d-4c79-9328-2b3433da583c-memberlist") pod "speaker-fj6dj" (UID: "3c80ea24-095d-4c79-9328-2b3433da583c") : secret "metallb-memberlist" not found Dec 13 06:44:24 crc kubenswrapper[5048]: I1213 06:44:24.504631 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3c80ea24-095d-4c79-9328-2b3433da583c-metrics-certs\") pod \"speaker-fj6dj\" (UID: \"3c80ea24-095d-4c79-9328-2b3433da583c\") " pod="metallb-system/speaker-fj6dj" Dec 13 06:44:24 crc kubenswrapper[5048]: I1213 06:44:24.506127 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2adbe773-2087-440f-bcdd-91ccb9eaa03f-metrics-certs\") pod \"controller-5bddd4b946-x8wnx\" (UID: \"2adbe773-2087-440f-bcdd-91ccb9eaa03f\") " pod="metallb-system/controller-5bddd4b946-x8wnx" Dec 13 06:44:24 crc kubenswrapper[5048]: I1213 06:44:24.582836 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:24 crc kubenswrapper[5048]: I1213 06:44:24.713517 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-5bddd4b946-x8wnx" Dec 13 06:44:24 crc kubenswrapper[5048]: I1213 06:44:24.921982 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lhdr4" event={"ID":"66a815bb-5ba1-4af1-a3de-45df9ae28539","Type":"ContainerStarted","Data":"3d701f56d933492b3b6a49f99f4fc6460400b1e4f6fd3a9cf4c7821b627455a5"} Dec 13 06:44:24 crc kubenswrapper[5048]: I1213 06:44:24.923277 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-cwjgt" event={"ID":"fb943e2e-85ed-4508-ade6-d16343977d3d","Type":"ContainerStarted","Data":"9de43fbeb6a47212858b6ed346bb8e427aba7ce0a32399fe29ca6082f6a62883"} Dec 13 06:44:25 crc kubenswrapper[5048]: I1213 06:44:25.522300 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/3c80ea24-095d-4c79-9328-2b3433da583c-memberlist\") pod \"speaker-fj6dj\" (UID: \"3c80ea24-095d-4c79-9328-2b3433da583c\") " pod="metallb-system/speaker-fj6dj" Dec 13 06:44:25 crc kubenswrapper[5048]: E1213 06:44:25.522499 5048 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Dec 13 06:44:25 crc kubenswrapper[5048]: E1213 06:44:25.523016 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3c80ea24-095d-4c79-9328-2b3433da583c-memberlist podName:3c80ea24-095d-4c79-9328-2b3433da583c nodeName:}" failed. No retries permitted until 2025-12-13 06:44:27.522964804 +0000 UTC m=+901.389559385 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/3c80ea24-095d-4c79-9328-2b3433da583c-memberlist") pod "speaker-fj6dj" (UID: "3c80ea24-095d-4c79-9328-2b3433da583c") : secret "metallb-memberlist" not found Dec 13 06:44:25 crc kubenswrapper[5048]: I1213 06:44:25.798383 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-5bddd4b946-x8wnx"] Dec 13 06:44:25 crc kubenswrapper[5048]: I1213 06:44:25.930261 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-5bddd4b946-x8wnx" event={"ID":"2adbe773-2087-440f-bcdd-91ccb9eaa03f","Type":"ContainerStarted","Data":"ba4baf0722ba2567e11778fc1d7d1622c930181a4e41c6e2802fed9d5d48b36a"} Dec 13 06:44:25 crc kubenswrapper[5048]: I1213 06:44:25.992184 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-9fx7g" Dec 13 06:44:25 crc kubenswrapper[5048]: I1213 06:44:25.992252 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-9fx7g" Dec 13 06:44:26 crc kubenswrapper[5048]: I1213 06:44:26.045654 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-9fx7g" Dec 13 06:44:26 crc kubenswrapper[5048]: E1213 06:44:26.857862 5048 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod66a815bb_5ba1_4af1_a3de_45df9ae28539.slice/crio-9c6db4db1b02f8d46024ea60e9896a568cd28f7e3adf4dd619f8298cfac379ff.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod66a815bb_5ba1_4af1_a3de_45df9ae28539.slice/crio-conmon-9c6db4db1b02f8d46024ea60e9896a568cd28f7e3adf4dd619f8298cfac379ff.scope\": RecentStats: unable to find data in memory cache]" Dec 13 06:44:26 crc kubenswrapper[5048]: I1213 06:44:26.937146 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-5bddd4b946-x8wnx" event={"ID":"2adbe773-2087-440f-bcdd-91ccb9eaa03f","Type":"ContainerStarted","Data":"bc2f0a66d110d10b6ef2fcf8ebc3ecb70780a3159618cfbe5a3d61399581ea20"} Dec 13 06:44:26 crc kubenswrapper[5048]: I1213 06:44:26.938489 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-5bddd4b946-x8wnx" Dec 13 06:44:26 crc kubenswrapper[5048]: I1213 06:44:26.938552 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-5bddd4b946-x8wnx" event={"ID":"2adbe773-2087-440f-bcdd-91ccb9eaa03f","Type":"ContainerStarted","Data":"cbb4acab8fa5a244f0f58238b404f6a93f35395c75fd74142080c4505d389666"} Dec 13 06:44:26 crc kubenswrapper[5048]: I1213 06:44:26.942782 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rm42c" event={"ID":"b6da7b5a-20a7-4721-b245-c63202188d2f","Type":"ContainerStarted","Data":"7d1112288ba09260276a44de15ab4f0f6b4b61b6f9b661245939f01f5eec5596"} Dec 13 06:44:26 crc kubenswrapper[5048]: I1213 06:44:26.945265 5048 generic.go:334] "Generic (PLEG): container finished" podID="66a815bb-5ba1-4af1-a3de-45df9ae28539" containerID="9c6db4db1b02f8d46024ea60e9896a568cd28f7e3adf4dd619f8298cfac379ff" exitCode=0 Dec 13 06:44:26 crc kubenswrapper[5048]: I1213 06:44:26.945328 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lhdr4" event={"ID":"66a815bb-5ba1-4af1-a3de-45df9ae28539","Type":"ContainerDied","Data":"9c6db4db1b02f8d46024ea60e9896a568cd28f7e3adf4dd619f8298cfac379ff"} Dec 13 06:44:26 crc kubenswrapper[5048]: I1213 06:44:26.959749 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-5bddd4b946-x8wnx" podStartSLOduration=3.959724514 podStartE2EDuration="3.959724514s" podCreationTimestamp="2025-12-13 06:44:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:44:26.95876336 +0000 UTC m=+900.825357961" watchObservedRunningTime="2025-12-13 06:44:26.959724514 +0000 UTC m=+900.826319105" Dec 13 06:44:27 crc kubenswrapper[5048]: I1213 06:44:27.018922 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-9fx7g" Dec 13 06:44:27 crc kubenswrapper[5048]: I1213 06:44:27.589454 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/3c80ea24-095d-4c79-9328-2b3433da583c-memberlist\") pod \"speaker-fj6dj\" (UID: \"3c80ea24-095d-4c79-9328-2b3433da583c\") " pod="metallb-system/speaker-fj6dj" Dec 13 06:44:27 crc kubenswrapper[5048]: I1213 06:44:27.595085 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/3c80ea24-095d-4c79-9328-2b3433da583c-memberlist\") pod \"speaker-fj6dj\" (UID: \"3c80ea24-095d-4c79-9328-2b3433da583c\") " pod="metallb-system/speaker-fj6dj" Dec 13 06:44:27 crc kubenswrapper[5048]: I1213 06:44:27.692957 5048 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-smmk7" Dec 13 06:44:27 crc kubenswrapper[5048]: I1213 06:44:27.701724 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-fj6dj" Dec 13 06:44:27 crc kubenswrapper[5048]: W1213 06:44:27.724274 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3c80ea24_095d_4c79_9328_2b3433da583c.slice/crio-301dd380295af70d694cf620160f341766fc8e390acdd5b1d5b726f970765ed1 WatchSource:0}: Error finding container 301dd380295af70d694cf620160f341766fc8e390acdd5b1d5b726f970765ed1: Status 404 returned error can't find the container with id 301dd380295af70d694cf620160f341766fc8e390acdd5b1d5b726f970765ed1 Dec 13 06:44:27 crc kubenswrapper[5048]: I1213 06:44:27.952899 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-fj6dj" event={"ID":"3c80ea24-095d-4c79-9328-2b3433da583c","Type":"ContainerStarted","Data":"301dd380295af70d694cf620160f341766fc8e390acdd5b1d5b726f970765ed1"} Dec 13 06:44:29 crc kubenswrapper[5048]: I1213 06:44:29.297040 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9fx7g"] Dec 13 06:44:29 crc kubenswrapper[5048]: I1213 06:44:29.299868 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-9fx7g" podUID="cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5" containerName="registry-server" containerID="cri-o://ad9b1e7417fe56eb3d4a8e590262853ea0f6b157c17d803f1f912af1bfda5bda" gracePeriod=2 Dec 13 06:44:29 crc kubenswrapper[5048]: I1213 06:44:29.966887 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-fj6dj" event={"ID":"3c80ea24-095d-4c79-9328-2b3433da583c","Type":"ContainerStarted","Data":"5f0badee36a98fcd2cc48cfa80cfbf5d9e69e50c0695bcff5443b0daf8abc890"} Dec 13 06:44:29 crc kubenswrapper[5048]: I1213 06:44:29.967233 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-fj6dj" event={"ID":"3c80ea24-095d-4c79-9328-2b3433da583c","Type":"ContainerStarted","Data":"2be60a9ce3148f6dbbc92755fbd4fac0d0f95c52bec4b08ae69359dc16b9e95d"} Dec 13 06:44:29 crc kubenswrapper[5048]: I1213 06:44:29.968211 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-fj6dj" Dec 13 06:44:29 crc kubenswrapper[5048]: I1213 06:44:29.970866 5048 generic.go:334] "Generic (PLEG): container finished" podID="66a815bb-5ba1-4af1-a3de-45df9ae28539" containerID="ccfdd82419e9912bb5dd17ca5c678e8e9721843bdd450d64dce874b1a692c923" exitCode=0 Dec 13 06:44:29 crc kubenswrapper[5048]: I1213 06:44:29.970917 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lhdr4" event={"ID":"66a815bb-5ba1-4af1-a3de-45df9ae28539","Type":"ContainerDied","Data":"ccfdd82419e9912bb5dd17ca5c678e8e9721843bdd450d64dce874b1a692c923"} Dec 13 06:44:29 crc kubenswrapper[5048]: I1213 06:44:29.978531 5048 generic.go:334] "Generic (PLEG): container finished" podID="cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5" containerID="ad9b1e7417fe56eb3d4a8e590262853ea0f6b157c17d803f1f912af1bfda5bda" exitCode=0 Dec 13 06:44:29 crc kubenswrapper[5048]: I1213 06:44:29.978577 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9fx7g" event={"ID":"cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5","Type":"ContainerDied","Data":"ad9b1e7417fe56eb3d4a8e590262853ea0f6b157c17d803f1f912af1bfda5bda"} Dec 13 06:44:29 crc kubenswrapper[5048]: I1213 06:44:29.994639 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-fj6dj" podStartSLOduration=6.994624176 podStartE2EDuration="6.994624176s" podCreationTimestamp="2025-12-13 06:44:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:44:29.990863358 +0000 UTC m=+903.857457949" watchObservedRunningTime="2025-12-13 06:44:29.994624176 +0000 UTC m=+903.861218757" Dec 13 06:44:32 crc kubenswrapper[5048]: I1213 06:44:32.466179 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9fx7g" Dec 13 06:44:32 crc kubenswrapper[5048]: I1213 06:44:32.472178 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f2bzg\" (UniqueName: \"kubernetes.io/projected/cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5-kube-api-access-f2bzg\") pod \"cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5\" (UID: \"cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5\") " Dec 13 06:44:32 crc kubenswrapper[5048]: I1213 06:44:32.472296 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5-utilities\") pod \"cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5\" (UID: \"cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5\") " Dec 13 06:44:32 crc kubenswrapper[5048]: I1213 06:44:32.472387 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5-catalog-content\") pod \"cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5\" (UID: \"cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5\") " Dec 13 06:44:32 crc kubenswrapper[5048]: I1213 06:44:32.473279 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5-utilities" (OuterVolumeSpecName: "utilities") pod "cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5" (UID: "cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:44:32 crc kubenswrapper[5048]: I1213 06:44:32.478209 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5-kube-api-access-f2bzg" (OuterVolumeSpecName: "kube-api-access-f2bzg") pod "cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5" (UID: "cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5"). InnerVolumeSpecName "kube-api-access-f2bzg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:44:32 crc kubenswrapper[5048]: I1213 06:44:32.525613 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5" (UID: "cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:44:32 crc kubenswrapper[5048]: I1213 06:44:32.574056 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f2bzg\" (UniqueName: \"kubernetes.io/projected/cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5-kube-api-access-f2bzg\") on node \"crc\" DevicePath \"\"" Dec 13 06:44:32 crc kubenswrapper[5048]: I1213 06:44:32.574081 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 06:44:32 crc kubenswrapper[5048]: I1213 06:44:32.574092 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 06:44:33 crc kubenswrapper[5048]: I1213 06:44:33.009930 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9fx7g" event={"ID":"cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5","Type":"ContainerDied","Data":"7f2eabfc6578c99acae3428f47f33f736fd37eeb0671c9c1f768573ebbdbf1ca"} Dec 13 06:44:33 crc kubenswrapper[5048]: I1213 06:44:33.010002 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9fx7g" Dec 13 06:44:33 crc kubenswrapper[5048]: I1213 06:44:33.010172 5048 scope.go:117] "RemoveContainer" containerID="ad9b1e7417fe56eb3d4a8e590262853ea0f6b157c17d803f1f912af1bfda5bda" Dec 13 06:44:33 crc kubenswrapper[5048]: I1213 06:44:33.015529 5048 generic.go:334] "Generic (PLEG): container finished" podID="b6da7b5a-20a7-4721-b245-c63202188d2f" containerID="918c4b5f7a41eb48350c0085669ff25f473c7a1fe7a70cd3bd6827c8799d676d" exitCode=0 Dec 13 06:44:33 crc kubenswrapper[5048]: I1213 06:44:33.015579 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rm42c" event={"ID":"b6da7b5a-20a7-4721-b245-c63202188d2f","Type":"ContainerDied","Data":"918c4b5f7a41eb48350c0085669ff25f473c7a1fe7a70cd3bd6827c8799d676d"} Dec 13 06:44:33 crc kubenswrapper[5048]: I1213 06:44:33.019837 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-cwjgt" event={"ID":"fb943e2e-85ed-4508-ade6-d16343977d3d","Type":"ContainerStarted","Data":"8ea92df2b0370d87f35ee96a19a670f4f0d9f0307d4e0b6f7041ca40ef1788c5"} Dec 13 06:44:33 crc kubenswrapper[5048]: I1213 06:44:33.019994 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-cwjgt" Dec 13 06:44:33 crc kubenswrapper[5048]: I1213 06:44:33.041216 5048 scope.go:117] "RemoveContainer" containerID="d92f6dc7fcf9e6b4b165f5a0d908dccaa3fb46aefd28e4347cbea2ab50434712" Dec 13 06:44:33 crc kubenswrapper[5048]: I1213 06:44:33.060221 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9fx7g"] Dec 13 06:44:33 crc kubenswrapper[5048]: I1213 06:44:33.064254 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-9fx7g"] Dec 13 06:44:33 crc kubenswrapper[5048]: I1213 06:44:33.079939 5048 scope.go:117] "RemoveContainer" containerID="426c4e10ca01e7399bd6acbe8944e12ddb6621d979b92830bb47fb8dfc808ab2" Dec 13 06:44:34 crc kubenswrapper[5048]: I1213 06:44:34.026962 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lhdr4" event={"ID":"66a815bb-5ba1-4af1-a3de-45df9ae28539","Type":"ContainerStarted","Data":"885e9b87ec2ebb9cc3d3e474dd77b70acbe8d6de99ab0fc0004fc990a7aaea90"} Dec 13 06:44:34 crc kubenswrapper[5048]: I1213 06:44:34.031305 5048 generic.go:334] "Generic (PLEG): container finished" podID="b6da7b5a-20a7-4721-b245-c63202188d2f" containerID="0e88efc2bcf6722a64eb3137ada11c488fff7a54e8c0c95dd064934200625f3b" exitCode=0 Dec 13 06:44:34 crc kubenswrapper[5048]: I1213 06:44:34.031402 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rm42c" event={"ID":"b6da7b5a-20a7-4721-b245-c63202188d2f","Type":"ContainerDied","Data":"0e88efc2bcf6722a64eb3137ada11c488fff7a54e8c0c95dd064934200625f3b"} Dec 13 06:44:34 crc kubenswrapper[5048]: I1213 06:44:34.060681 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-cwjgt" podStartSLOduration=2.769722227 podStartE2EDuration="11.060655798s" podCreationTimestamp="2025-12-13 06:44:23 +0000 UTC" firstStartedPulling="2025-12-13 06:44:24.244152206 +0000 UTC m=+898.110746787" lastFinishedPulling="2025-12-13 06:44:32.535085777 +0000 UTC m=+906.401680358" observedRunningTime="2025-12-13 06:44:33.073711341 +0000 UTC m=+906.940305942" watchObservedRunningTime="2025-12-13 06:44:34.060655798 +0000 UTC m=+907.927250399" Dec 13 06:44:34 crc kubenswrapper[5048]: I1213 06:44:34.066591 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lhdr4" podStartSLOduration=5.061329602 podStartE2EDuration="11.066569321s" podCreationTimestamp="2025-12-13 06:44:23 +0000 UTC" firstStartedPulling="2025-12-13 06:44:26.949628303 +0000 UTC m=+900.816222884" lastFinishedPulling="2025-12-13 06:44:32.954868032 +0000 UTC m=+906.821462603" observedRunningTime="2025-12-13 06:44:34.05107618 +0000 UTC m=+907.917670761" watchObservedRunningTime="2025-12-13 06:44:34.066569321 +0000 UTC m=+907.933163912" Dec 13 06:44:34 crc kubenswrapper[5048]: I1213 06:44:34.575173 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5" path="/var/lib/kubelet/pods/cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5/volumes" Dec 13 06:44:35 crc kubenswrapper[5048]: I1213 06:44:35.038662 5048 generic.go:334] "Generic (PLEG): container finished" podID="b6da7b5a-20a7-4721-b245-c63202188d2f" containerID="d0c81ae46382d045d15bd5f3503d09d6c159d5bc5b74307646bbe6f40ef48529" exitCode=0 Dec 13 06:44:35 crc kubenswrapper[5048]: I1213 06:44:35.038737 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rm42c" event={"ID":"b6da7b5a-20a7-4721-b245-c63202188d2f","Type":"ContainerDied","Data":"d0c81ae46382d045d15bd5f3503d09d6c159d5bc5b74307646bbe6f40ef48529"} Dec 13 06:44:36 crc kubenswrapper[5048]: I1213 06:44:36.047780 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rm42c" event={"ID":"b6da7b5a-20a7-4721-b245-c63202188d2f","Type":"ContainerStarted","Data":"4ad9949eadf05958c2b509f1647b5e98c0dd559e8ffe9bcffc512266087eb58a"} Dec 13 06:44:36 crc kubenswrapper[5048]: I1213 06:44:36.048919 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rm42c" event={"ID":"b6da7b5a-20a7-4721-b245-c63202188d2f","Type":"ContainerStarted","Data":"973ae3b6bbc81de97706c5d4e2c0369a106df5c44b1b9829cf4060b7a3c76872"} Dec 13 06:44:36 crc kubenswrapper[5048]: I1213 06:44:36.048998 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rm42c" event={"ID":"b6da7b5a-20a7-4721-b245-c63202188d2f","Type":"ContainerStarted","Data":"04f3a8bff0b1e7b48cd1fc67c4ed679f7c6bd9c71cf6b81786e70b4f47008f47"} Dec 13 06:44:39 crc kubenswrapper[5048]: I1213 06:44:39.073043 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rm42c" event={"ID":"b6da7b5a-20a7-4721-b245-c63202188d2f","Type":"ContainerStarted","Data":"8d3b4c0b24544b088603ac9227d92048adc7672d3755beba155bfc4d20356882"} Dec 13 06:44:39 crc kubenswrapper[5048]: I1213 06:44:39.073410 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rm42c" event={"ID":"b6da7b5a-20a7-4721-b245-c63202188d2f","Type":"ContainerStarted","Data":"c45fe10f7ccd3e98ad60965a99ea181560ae0f064d7f8db0d17d6f5140c2a9be"} Dec 13 06:44:40 crc kubenswrapper[5048]: I1213 06:44:40.003195 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hbbb4"] Dec 13 06:44:40 crc kubenswrapper[5048]: E1213 06:44:40.003722 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5" containerName="registry-server" Dec 13 06:44:40 crc kubenswrapper[5048]: I1213 06:44:40.003735 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5" containerName="registry-server" Dec 13 06:44:40 crc kubenswrapper[5048]: E1213 06:44:40.003746 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5" containerName="extract-content" Dec 13 06:44:40 crc kubenswrapper[5048]: I1213 06:44:40.003751 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5" containerName="extract-content" Dec 13 06:44:40 crc kubenswrapper[5048]: E1213 06:44:40.003762 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5" containerName="extract-utilities" Dec 13 06:44:40 crc kubenswrapper[5048]: I1213 06:44:40.003768 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5" containerName="extract-utilities" Dec 13 06:44:40 crc kubenswrapper[5048]: I1213 06:44:40.003879 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb4f7b3d-aeb5-471d-ac15-d4f4815e00e5" containerName="registry-server" Dec 13 06:44:40 crc kubenswrapper[5048]: I1213 06:44:40.004728 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hbbb4" Dec 13 06:44:40 crc kubenswrapper[5048]: I1213 06:44:40.018621 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hbbb4"] Dec 13 06:44:40 crc kubenswrapper[5048]: I1213 06:44:40.074307 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2slpr\" (UniqueName: \"kubernetes.io/projected/c2b00f79-a155-447b-929d-7fd48f2260da-kube-api-access-2slpr\") pod \"certified-operators-hbbb4\" (UID: \"c2b00f79-a155-447b-929d-7fd48f2260da\") " pod="openshift-marketplace/certified-operators-hbbb4" Dec 13 06:44:40 crc kubenswrapper[5048]: I1213 06:44:40.074359 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2b00f79-a155-447b-929d-7fd48f2260da-catalog-content\") pod \"certified-operators-hbbb4\" (UID: \"c2b00f79-a155-447b-929d-7fd48f2260da\") " pod="openshift-marketplace/certified-operators-hbbb4" Dec 13 06:44:40 crc kubenswrapper[5048]: I1213 06:44:40.074384 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2b00f79-a155-447b-929d-7fd48f2260da-utilities\") pod \"certified-operators-hbbb4\" (UID: \"c2b00f79-a155-447b-929d-7fd48f2260da\") " pod="openshift-marketplace/certified-operators-hbbb4" Dec 13 06:44:40 crc kubenswrapper[5048]: I1213 06:44:40.087110 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rm42c" event={"ID":"b6da7b5a-20a7-4721-b245-c63202188d2f","Type":"ContainerStarted","Data":"8370778e8ffe63ee297a3dd149258f96c8ab8e20456e7e3657d2c703d6139e82"} Dec 13 06:44:40 crc kubenswrapper[5048]: I1213 06:44:40.087312 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:40 crc kubenswrapper[5048]: I1213 06:44:40.112301 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-rm42c" podStartSLOduration=11.155147197 podStartE2EDuration="17.112281809s" podCreationTimestamp="2025-12-13 06:44:23 +0000 UTC" firstStartedPulling="2025-12-13 06:44:26.595040667 +0000 UTC m=+900.461635248" lastFinishedPulling="2025-12-13 06:44:32.552175279 +0000 UTC m=+906.418769860" observedRunningTime="2025-12-13 06:44:40.109808374 +0000 UTC m=+913.976402975" watchObservedRunningTime="2025-12-13 06:44:40.112281809 +0000 UTC m=+913.978876390" Dec 13 06:44:40 crc kubenswrapper[5048]: I1213 06:44:40.175276 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2slpr\" (UniqueName: \"kubernetes.io/projected/c2b00f79-a155-447b-929d-7fd48f2260da-kube-api-access-2slpr\") pod \"certified-operators-hbbb4\" (UID: \"c2b00f79-a155-447b-929d-7fd48f2260da\") " pod="openshift-marketplace/certified-operators-hbbb4" Dec 13 06:44:40 crc kubenswrapper[5048]: I1213 06:44:40.175323 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2b00f79-a155-447b-929d-7fd48f2260da-catalog-content\") pod \"certified-operators-hbbb4\" (UID: \"c2b00f79-a155-447b-929d-7fd48f2260da\") " pod="openshift-marketplace/certified-operators-hbbb4" Dec 13 06:44:40 crc kubenswrapper[5048]: I1213 06:44:40.175355 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2b00f79-a155-447b-929d-7fd48f2260da-utilities\") pod \"certified-operators-hbbb4\" (UID: \"c2b00f79-a155-447b-929d-7fd48f2260da\") " pod="openshift-marketplace/certified-operators-hbbb4" Dec 13 06:44:40 crc kubenswrapper[5048]: I1213 06:44:40.175843 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2b00f79-a155-447b-929d-7fd48f2260da-catalog-content\") pod \"certified-operators-hbbb4\" (UID: \"c2b00f79-a155-447b-929d-7fd48f2260da\") " pod="openshift-marketplace/certified-operators-hbbb4" Dec 13 06:44:40 crc kubenswrapper[5048]: I1213 06:44:40.175882 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2b00f79-a155-447b-929d-7fd48f2260da-utilities\") pod \"certified-operators-hbbb4\" (UID: \"c2b00f79-a155-447b-929d-7fd48f2260da\") " pod="openshift-marketplace/certified-operators-hbbb4" Dec 13 06:44:40 crc kubenswrapper[5048]: I1213 06:44:40.194186 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2slpr\" (UniqueName: \"kubernetes.io/projected/c2b00f79-a155-447b-929d-7fd48f2260da-kube-api-access-2slpr\") pod \"certified-operators-hbbb4\" (UID: \"c2b00f79-a155-447b-929d-7fd48f2260da\") " pod="openshift-marketplace/certified-operators-hbbb4" Dec 13 06:44:40 crc kubenswrapper[5048]: I1213 06:44:40.320711 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hbbb4" Dec 13 06:44:40 crc kubenswrapper[5048]: I1213 06:44:40.541864 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hbbb4"] Dec 13 06:44:40 crc kubenswrapper[5048]: W1213 06:44:40.550646 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc2b00f79_a155_447b_929d_7fd48f2260da.slice/crio-51c2a7253e581adc2611cd4d9f03249793978ff9039af0ca0a89b0b231b22f4a WatchSource:0}: Error finding container 51c2a7253e581adc2611cd4d9f03249793978ff9039af0ca0a89b0b231b22f4a: Status 404 returned error can't find the container with id 51c2a7253e581adc2611cd4d9f03249793978ff9039af0ca0a89b0b231b22f4a Dec 13 06:44:41 crc kubenswrapper[5048]: I1213 06:44:41.096112 5048 generic.go:334] "Generic (PLEG): container finished" podID="c2b00f79-a155-447b-929d-7fd48f2260da" containerID="48c096d9450b238475b598672f6050cc1465553377374cae43cdf91b4de1cc16" exitCode=0 Dec 13 06:44:41 crc kubenswrapper[5048]: I1213 06:44:41.096235 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hbbb4" event={"ID":"c2b00f79-a155-447b-929d-7fd48f2260da","Type":"ContainerDied","Data":"48c096d9450b238475b598672f6050cc1465553377374cae43cdf91b4de1cc16"} Dec 13 06:44:41 crc kubenswrapper[5048]: I1213 06:44:41.096594 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hbbb4" event={"ID":"c2b00f79-a155-447b-929d-7fd48f2260da","Type":"ContainerStarted","Data":"51c2a7253e581adc2611cd4d9f03249793978ff9039af0ca0a89b0b231b22f4a"} Dec 13 06:44:43 crc kubenswrapper[5048]: I1213 06:44:43.637513 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lhdr4" Dec 13 06:44:43 crc kubenswrapper[5048]: I1213 06:44:43.637833 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lhdr4" Dec 13 06:44:43 crc kubenswrapper[5048]: I1213 06:44:43.677473 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lhdr4" Dec 13 06:44:43 crc kubenswrapper[5048]: I1213 06:44:43.968236 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-cwjgt" Dec 13 06:44:44 crc kubenswrapper[5048]: I1213 06:44:44.128503 5048 generic.go:334] "Generic (PLEG): container finished" podID="c2b00f79-a155-447b-929d-7fd48f2260da" containerID="b13f48a937d3787b143401bcb9c46415a1ae7c0d377321e2a2cb3bbc8a792869" exitCode=0 Dec 13 06:44:44 crc kubenswrapper[5048]: I1213 06:44:44.130245 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hbbb4" event={"ID":"c2b00f79-a155-447b-929d-7fd48f2260da","Type":"ContainerDied","Data":"b13f48a937d3787b143401bcb9c46415a1ae7c0d377321e2a2cb3bbc8a792869"} Dec 13 06:44:44 crc kubenswrapper[5048]: I1213 06:44:44.175988 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lhdr4" Dec 13 06:44:44 crc kubenswrapper[5048]: I1213 06:44:44.583332 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:44 crc kubenswrapper[5048]: I1213 06:44:44.631132 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:44 crc kubenswrapper[5048]: I1213 06:44:44.718312 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-5bddd4b946-x8wnx" Dec 13 06:44:45 crc kubenswrapper[5048]: I1213 06:44:45.136906 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-rm42c" Dec 13 06:44:45 crc kubenswrapper[5048]: I1213 06:44:45.382854 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lhdr4"] Dec 13 06:44:46 crc kubenswrapper[5048]: I1213 06:44:46.142523 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lhdr4" podUID="66a815bb-5ba1-4af1-a3de-45df9ae28539" containerName="registry-server" containerID="cri-o://885e9b87ec2ebb9cc3d3e474dd77b70acbe8d6de99ab0fc0004fc990a7aaea90" gracePeriod=2 Dec 13 06:44:46 crc kubenswrapper[5048]: I1213 06:44:46.143344 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hbbb4" event={"ID":"c2b00f79-a155-447b-929d-7fd48f2260da","Type":"ContainerStarted","Data":"5de3ca957f3a92f98238b78974b65985ceab26514b7baf06686779b006e67006"} Dec 13 06:44:46 crc kubenswrapper[5048]: I1213 06:44:46.172363 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-hbbb4" podStartSLOduration=2.374014604 podStartE2EDuration="7.172341448s" podCreationTimestamp="2025-12-13 06:44:39 +0000 UTC" firstStartedPulling="2025-12-13 06:44:41.100034166 +0000 UTC m=+914.966628757" lastFinishedPulling="2025-12-13 06:44:45.89836102 +0000 UTC m=+919.764955601" observedRunningTime="2025-12-13 06:44:46.166044735 +0000 UTC m=+920.032639336" watchObservedRunningTime="2025-12-13 06:44:46.172341448 +0000 UTC m=+920.038936039" Dec 13 06:44:46 crc kubenswrapper[5048]: I1213 06:44:46.503854 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lhdr4" Dec 13 06:44:46 crc kubenswrapper[5048]: I1213 06:44:46.665102 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a815bb-5ba1-4af1-a3de-45df9ae28539-utilities\") pod \"66a815bb-5ba1-4af1-a3de-45df9ae28539\" (UID: \"66a815bb-5ba1-4af1-a3de-45df9ae28539\") " Dec 13 06:44:46 crc kubenswrapper[5048]: I1213 06:44:46.665165 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a815bb-5ba1-4af1-a3de-45df9ae28539-catalog-content\") pod \"66a815bb-5ba1-4af1-a3de-45df9ae28539\" (UID: \"66a815bb-5ba1-4af1-a3de-45df9ae28539\") " Dec 13 06:44:46 crc kubenswrapper[5048]: I1213 06:44:46.665286 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dkl77\" (UniqueName: \"kubernetes.io/projected/66a815bb-5ba1-4af1-a3de-45df9ae28539-kube-api-access-dkl77\") pod \"66a815bb-5ba1-4af1-a3de-45df9ae28539\" (UID: \"66a815bb-5ba1-4af1-a3de-45df9ae28539\") " Dec 13 06:44:46 crc kubenswrapper[5048]: I1213 06:44:46.665930 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66a815bb-5ba1-4af1-a3de-45df9ae28539-utilities" (OuterVolumeSpecName: "utilities") pod "66a815bb-5ba1-4af1-a3de-45df9ae28539" (UID: "66a815bb-5ba1-4af1-a3de-45df9ae28539"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:44:46 crc kubenswrapper[5048]: I1213 06:44:46.675591 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66a815bb-5ba1-4af1-a3de-45df9ae28539-kube-api-access-dkl77" (OuterVolumeSpecName: "kube-api-access-dkl77") pod "66a815bb-5ba1-4af1-a3de-45df9ae28539" (UID: "66a815bb-5ba1-4af1-a3de-45df9ae28539"). InnerVolumeSpecName "kube-api-access-dkl77". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:44:46 crc kubenswrapper[5048]: I1213 06:44:46.694587 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66a815bb-5ba1-4af1-a3de-45df9ae28539-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "66a815bb-5ba1-4af1-a3de-45df9ae28539" (UID: "66a815bb-5ba1-4af1-a3de-45df9ae28539"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:44:46 crc kubenswrapper[5048]: I1213 06:44:46.766851 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dkl77\" (UniqueName: \"kubernetes.io/projected/66a815bb-5ba1-4af1-a3de-45df9ae28539-kube-api-access-dkl77\") on node \"crc\" DevicePath \"\"" Dec 13 06:44:46 crc kubenswrapper[5048]: I1213 06:44:46.766892 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a815bb-5ba1-4af1-a3de-45df9ae28539-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 06:44:46 crc kubenswrapper[5048]: I1213 06:44:46.766905 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a815bb-5ba1-4af1-a3de-45df9ae28539-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 06:44:47 crc kubenswrapper[5048]: I1213 06:44:47.157538 5048 generic.go:334] "Generic (PLEG): container finished" podID="66a815bb-5ba1-4af1-a3de-45df9ae28539" containerID="885e9b87ec2ebb9cc3d3e474dd77b70acbe8d6de99ab0fc0004fc990a7aaea90" exitCode=0 Dec 13 06:44:47 crc kubenswrapper[5048]: I1213 06:44:47.157825 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lhdr4" Dec 13 06:44:47 crc kubenswrapper[5048]: I1213 06:44:47.157878 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lhdr4" event={"ID":"66a815bb-5ba1-4af1-a3de-45df9ae28539","Type":"ContainerDied","Data":"885e9b87ec2ebb9cc3d3e474dd77b70acbe8d6de99ab0fc0004fc990a7aaea90"} Dec 13 06:44:47 crc kubenswrapper[5048]: I1213 06:44:47.157916 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lhdr4" event={"ID":"66a815bb-5ba1-4af1-a3de-45df9ae28539","Type":"ContainerDied","Data":"3d701f56d933492b3b6a49f99f4fc6460400b1e4f6fd3a9cf4c7821b627455a5"} Dec 13 06:44:47 crc kubenswrapper[5048]: I1213 06:44:47.157938 5048 scope.go:117] "RemoveContainer" containerID="885e9b87ec2ebb9cc3d3e474dd77b70acbe8d6de99ab0fc0004fc990a7aaea90" Dec 13 06:44:47 crc kubenswrapper[5048]: I1213 06:44:47.181388 5048 scope.go:117] "RemoveContainer" containerID="ccfdd82419e9912bb5dd17ca5c678e8e9721843bdd450d64dce874b1a692c923" Dec 13 06:44:47 crc kubenswrapper[5048]: I1213 06:44:47.208528 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lhdr4"] Dec 13 06:44:47 crc kubenswrapper[5048]: I1213 06:44:47.213604 5048 scope.go:117] "RemoveContainer" containerID="9c6db4db1b02f8d46024ea60e9896a568cd28f7e3adf4dd619f8298cfac379ff" Dec 13 06:44:47 crc kubenswrapper[5048]: I1213 06:44:47.213658 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lhdr4"] Dec 13 06:44:47 crc kubenswrapper[5048]: I1213 06:44:47.230820 5048 scope.go:117] "RemoveContainer" containerID="885e9b87ec2ebb9cc3d3e474dd77b70acbe8d6de99ab0fc0004fc990a7aaea90" Dec 13 06:44:47 crc kubenswrapper[5048]: E1213 06:44:47.236287 5048 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod66a815bb_5ba1_4af1_a3de_45df9ae28539.slice\": RecentStats: unable to find data in memory cache]" Dec 13 06:44:47 crc kubenswrapper[5048]: E1213 06:44:47.240889 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"885e9b87ec2ebb9cc3d3e474dd77b70acbe8d6de99ab0fc0004fc990a7aaea90\": container with ID starting with 885e9b87ec2ebb9cc3d3e474dd77b70acbe8d6de99ab0fc0004fc990a7aaea90 not found: ID does not exist" containerID="885e9b87ec2ebb9cc3d3e474dd77b70acbe8d6de99ab0fc0004fc990a7aaea90" Dec 13 06:44:47 crc kubenswrapper[5048]: I1213 06:44:47.240942 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"885e9b87ec2ebb9cc3d3e474dd77b70acbe8d6de99ab0fc0004fc990a7aaea90"} err="failed to get container status \"885e9b87ec2ebb9cc3d3e474dd77b70acbe8d6de99ab0fc0004fc990a7aaea90\": rpc error: code = NotFound desc = could not find container \"885e9b87ec2ebb9cc3d3e474dd77b70acbe8d6de99ab0fc0004fc990a7aaea90\": container with ID starting with 885e9b87ec2ebb9cc3d3e474dd77b70acbe8d6de99ab0fc0004fc990a7aaea90 not found: ID does not exist" Dec 13 06:44:47 crc kubenswrapper[5048]: I1213 06:44:47.240972 5048 scope.go:117] "RemoveContainer" containerID="ccfdd82419e9912bb5dd17ca5c678e8e9721843bdd450d64dce874b1a692c923" Dec 13 06:44:47 crc kubenswrapper[5048]: E1213 06:44:47.241471 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ccfdd82419e9912bb5dd17ca5c678e8e9721843bdd450d64dce874b1a692c923\": container with ID starting with ccfdd82419e9912bb5dd17ca5c678e8e9721843bdd450d64dce874b1a692c923 not found: ID does not exist" containerID="ccfdd82419e9912bb5dd17ca5c678e8e9721843bdd450d64dce874b1a692c923" Dec 13 06:44:47 crc kubenswrapper[5048]: I1213 06:44:47.241501 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ccfdd82419e9912bb5dd17ca5c678e8e9721843bdd450d64dce874b1a692c923"} err="failed to get container status \"ccfdd82419e9912bb5dd17ca5c678e8e9721843bdd450d64dce874b1a692c923\": rpc error: code = NotFound desc = could not find container \"ccfdd82419e9912bb5dd17ca5c678e8e9721843bdd450d64dce874b1a692c923\": container with ID starting with ccfdd82419e9912bb5dd17ca5c678e8e9721843bdd450d64dce874b1a692c923 not found: ID does not exist" Dec 13 06:44:47 crc kubenswrapper[5048]: I1213 06:44:47.241521 5048 scope.go:117] "RemoveContainer" containerID="9c6db4db1b02f8d46024ea60e9896a568cd28f7e3adf4dd619f8298cfac379ff" Dec 13 06:44:47 crc kubenswrapper[5048]: E1213 06:44:47.241957 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c6db4db1b02f8d46024ea60e9896a568cd28f7e3adf4dd619f8298cfac379ff\": container with ID starting with 9c6db4db1b02f8d46024ea60e9896a568cd28f7e3adf4dd619f8298cfac379ff not found: ID does not exist" containerID="9c6db4db1b02f8d46024ea60e9896a568cd28f7e3adf4dd619f8298cfac379ff" Dec 13 06:44:47 crc kubenswrapper[5048]: I1213 06:44:47.241986 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c6db4db1b02f8d46024ea60e9896a568cd28f7e3adf4dd619f8298cfac379ff"} err="failed to get container status \"9c6db4db1b02f8d46024ea60e9896a568cd28f7e3adf4dd619f8298cfac379ff\": rpc error: code = NotFound desc = could not find container \"9c6db4db1b02f8d46024ea60e9896a568cd28f7e3adf4dd619f8298cfac379ff\": container with ID starting with 9c6db4db1b02f8d46024ea60e9896a568cd28f7e3adf4dd619f8298cfac379ff not found: ID does not exist" Dec 13 06:44:47 crc kubenswrapper[5048]: I1213 06:44:47.707920 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-fj6dj" Dec 13 06:44:48 crc kubenswrapper[5048]: I1213 06:44:48.573567 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66a815bb-5ba1-4af1-a3de-45df9ae28539" path="/var/lib/kubelet/pods/66a815bb-5ba1-4af1-a3de-45df9ae28539/volumes" Dec 13 06:44:50 crc kubenswrapper[5048]: I1213 06:44:50.321364 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-hbbb4" Dec 13 06:44:50 crc kubenswrapper[5048]: I1213 06:44:50.321635 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-hbbb4" Dec 13 06:44:50 crc kubenswrapper[5048]: I1213 06:44:50.366577 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-hbbb4" Dec 13 06:44:51 crc kubenswrapper[5048]: I1213 06:44:51.221513 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-hbbb4" Dec 13 06:44:53 crc kubenswrapper[5048]: I1213 06:44:53.781782 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hbbb4"] Dec 13 06:44:53 crc kubenswrapper[5048]: I1213 06:44:53.782327 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hbbb4" podUID="c2b00f79-a155-447b-929d-7fd48f2260da" containerName="registry-server" containerID="cri-o://5de3ca957f3a92f98238b78974b65985ceab26514b7baf06686779b006e67006" gracePeriod=2 Dec 13 06:44:54 crc kubenswrapper[5048]: I1213 06:44:54.191479 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-qlml5"] Dec 13 06:44:54 crc kubenswrapper[5048]: E1213 06:44:54.191741 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66a815bb-5ba1-4af1-a3de-45df9ae28539" containerName="registry-server" Dec 13 06:44:54 crc kubenswrapper[5048]: I1213 06:44:54.191757 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="66a815bb-5ba1-4af1-a3de-45df9ae28539" containerName="registry-server" Dec 13 06:44:54 crc kubenswrapper[5048]: E1213 06:44:54.191786 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66a815bb-5ba1-4af1-a3de-45df9ae28539" containerName="extract-utilities" Dec 13 06:44:54 crc kubenswrapper[5048]: I1213 06:44:54.191800 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="66a815bb-5ba1-4af1-a3de-45df9ae28539" containerName="extract-utilities" Dec 13 06:44:54 crc kubenswrapper[5048]: E1213 06:44:54.191814 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66a815bb-5ba1-4af1-a3de-45df9ae28539" containerName="extract-content" Dec 13 06:44:54 crc kubenswrapper[5048]: I1213 06:44:54.191821 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="66a815bb-5ba1-4af1-a3de-45df9ae28539" containerName="extract-content" Dec 13 06:44:54 crc kubenswrapper[5048]: I1213 06:44:54.191960 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="66a815bb-5ba1-4af1-a3de-45df9ae28539" containerName="registry-server" Dec 13 06:44:54 crc kubenswrapper[5048]: I1213 06:44:54.192382 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-qlml5" Dec 13 06:44:54 crc kubenswrapper[5048]: I1213 06:44:54.195584 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-jgzjv" Dec 13 06:44:54 crc kubenswrapper[5048]: I1213 06:44:54.195584 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Dec 13 06:44:54 crc kubenswrapper[5048]: I1213 06:44:54.196566 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Dec 13 06:44:54 crc kubenswrapper[5048]: I1213 06:44:54.200015 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-qlml5"] Dec 13 06:44:54 crc kubenswrapper[5048]: I1213 06:44:54.310491 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bh7lb\" (UniqueName: \"kubernetes.io/projected/60f70325-e4e1-4fdc-ba21-c92b6ed5967e-kube-api-access-bh7lb\") pod \"openstack-operator-index-qlml5\" (UID: \"60f70325-e4e1-4fdc-ba21-c92b6ed5967e\") " pod="openstack-operators/openstack-operator-index-qlml5" Dec 13 06:44:54 crc kubenswrapper[5048]: I1213 06:44:54.411830 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bh7lb\" (UniqueName: \"kubernetes.io/projected/60f70325-e4e1-4fdc-ba21-c92b6ed5967e-kube-api-access-bh7lb\") pod \"openstack-operator-index-qlml5\" (UID: \"60f70325-e4e1-4fdc-ba21-c92b6ed5967e\") " pod="openstack-operators/openstack-operator-index-qlml5" Dec 13 06:44:54 crc kubenswrapper[5048]: I1213 06:44:54.429957 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bh7lb\" (UniqueName: \"kubernetes.io/projected/60f70325-e4e1-4fdc-ba21-c92b6ed5967e-kube-api-access-bh7lb\") pod \"openstack-operator-index-qlml5\" (UID: \"60f70325-e4e1-4fdc-ba21-c92b6ed5967e\") " pod="openstack-operators/openstack-operator-index-qlml5" Dec 13 06:44:54 crc kubenswrapper[5048]: I1213 06:44:54.511052 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-qlml5" Dec 13 06:44:54 crc kubenswrapper[5048]: I1213 06:44:54.945156 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-qlml5"] Dec 13 06:44:54 crc kubenswrapper[5048]: W1213 06:44:54.952907 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod60f70325_e4e1_4fdc_ba21_c92b6ed5967e.slice/crio-3131a82ca5132f180f401a3675e0dd9db45480fdbee79f556bb459d4f60ed690 WatchSource:0}: Error finding container 3131a82ca5132f180f401a3675e0dd9db45480fdbee79f556bb459d4f60ed690: Status 404 returned error can't find the container with id 3131a82ca5132f180f401a3675e0dd9db45480fdbee79f556bb459d4f60ed690 Dec 13 06:44:55 crc kubenswrapper[5048]: I1213 06:44:55.229897 5048 generic.go:334] "Generic (PLEG): container finished" podID="c2b00f79-a155-447b-929d-7fd48f2260da" containerID="5de3ca957f3a92f98238b78974b65985ceab26514b7baf06686779b006e67006" exitCode=0 Dec 13 06:44:55 crc kubenswrapper[5048]: I1213 06:44:55.230017 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hbbb4" event={"ID":"c2b00f79-a155-447b-929d-7fd48f2260da","Type":"ContainerDied","Data":"5de3ca957f3a92f98238b78974b65985ceab26514b7baf06686779b006e67006"} Dec 13 06:44:55 crc kubenswrapper[5048]: I1213 06:44:55.236231 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-qlml5" event={"ID":"60f70325-e4e1-4fdc-ba21-c92b6ed5967e","Type":"ContainerStarted","Data":"3131a82ca5132f180f401a3675e0dd9db45480fdbee79f556bb459d4f60ed690"} Dec 13 06:44:55 crc kubenswrapper[5048]: I1213 06:44:55.361638 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hbbb4" Dec 13 06:44:55 crc kubenswrapper[5048]: I1213 06:44:55.423243 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2b00f79-a155-447b-929d-7fd48f2260da-catalog-content\") pod \"c2b00f79-a155-447b-929d-7fd48f2260da\" (UID: \"c2b00f79-a155-447b-929d-7fd48f2260da\") " Dec 13 06:44:55 crc kubenswrapper[5048]: I1213 06:44:55.423395 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2slpr\" (UniqueName: \"kubernetes.io/projected/c2b00f79-a155-447b-929d-7fd48f2260da-kube-api-access-2slpr\") pod \"c2b00f79-a155-447b-929d-7fd48f2260da\" (UID: \"c2b00f79-a155-447b-929d-7fd48f2260da\") " Dec 13 06:44:55 crc kubenswrapper[5048]: I1213 06:44:55.423422 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2b00f79-a155-447b-929d-7fd48f2260da-utilities\") pod \"c2b00f79-a155-447b-929d-7fd48f2260da\" (UID: \"c2b00f79-a155-447b-929d-7fd48f2260da\") " Dec 13 06:44:55 crc kubenswrapper[5048]: I1213 06:44:55.424354 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2b00f79-a155-447b-929d-7fd48f2260da-utilities" (OuterVolumeSpecName: "utilities") pod "c2b00f79-a155-447b-929d-7fd48f2260da" (UID: "c2b00f79-a155-447b-929d-7fd48f2260da"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:44:55 crc kubenswrapper[5048]: I1213 06:44:55.436676 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2b00f79-a155-447b-929d-7fd48f2260da-kube-api-access-2slpr" (OuterVolumeSpecName: "kube-api-access-2slpr") pod "c2b00f79-a155-447b-929d-7fd48f2260da" (UID: "c2b00f79-a155-447b-929d-7fd48f2260da"). InnerVolumeSpecName "kube-api-access-2slpr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:44:55 crc kubenswrapper[5048]: I1213 06:44:55.473500 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2b00f79-a155-447b-929d-7fd48f2260da-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c2b00f79-a155-447b-929d-7fd48f2260da" (UID: "c2b00f79-a155-447b-929d-7fd48f2260da"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:44:55 crc kubenswrapper[5048]: I1213 06:44:55.524853 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2b00f79-a155-447b-929d-7fd48f2260da-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 06:44:55 crc kubenswrapper[5048]: I1213 06:44:55.524883 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2slpr\" (UniqueName: \"kubernetes.io/projected/c2b00f79-a155-447b-929d-7fd48f2260da-kube-api-access-2slpr\") on node \"crc\" DevicePath \"\"" Dec 13 06:44:55 crc kubenswrapper[5048]: I1213 06:44:55.524893 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2b00f79-a155-447b-929d-7fd48f2260da-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 06:44:56 crc kubenswrapper[5048]: I1213 06:44:56.247923 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hbbb4" event={"ID":"c2b00f79-a155-447b-929d-7fd48f2260da","Type":"ContainerDied","Data":"51c2a7253e581adc2611cd4d9f03249793978ff9039af0ca0a89b0b231b22f4a"} Dec 13 06:44:56 crc kubenswrapper[5048]: I1213 06:44:56.248298 5048 scope.go:117] "RemoveContainer" containerID="5de3ca957f3a92f98238b78974b65985ceab26514b7baf06686779b006e67006" Dec 13 06:44:56 crc kubenswrapper[5048]: I1213 06:44:56.248170 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hbbb4" Dec 13 06:44:56 crc kubenswrapper[5048]: I1213 06:44:56.286999 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hbbb4"] Dec 13 06:44:56 crc kubenswrapper[5048]: I1213 06:44:56.291111 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-hbbb4"] Dec 13 06:44:56 crc kubenswrapper[5048]: I1213 06:44:56.358760 5048 scope.go:117] "RemoveContainer" containerID="b13f48a937d3787b143401bcb9c46415a1ae7c0d377321e2a2cb3bbc8a792869" Dec 13 06:44:56 crc kubenswrapper[5048]: I1213 06:44:56.581235 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2b00f79-a155-447b-929d-7fd48f2260da" path="/var/lib/kubelet/pods/c2b00f79-a155-447b-929d-7fd48f2260da/volumes" Dec 13 06:44:56 crc kubenswrapper[5048]: I1213 06:44:56.863079 5048 scope.go:117] "RemoveContainer" containerID="48c096d9450b238475b598672f6050cc1465553377374cae43cdf91b4de1cc16" Dec 13 06:44:58 crc kubenswrapper[5048]: I1213 06:44:58.262497 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-qlml5" event={"ID":"60f70325-e4e1-4fdc-ba21-c92b6ed5967e","Type":"ContainerStarted","Data":"add779a4ace1ec06410ed535d5475dd7bae39ed15a6fbd5f713f5d2a08baf1ec"} Dec 13 06:44:58 crc kubenswrapper[5048]: I1213 06:44:58.279288 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-qlml5" podStartSLOduration=1.736161014 podStartE2EDuration="4.279268814s" podCreationTimestamp="2025-12-13 06:44:54 +0000 UTC" firstStartedPulling="2025-12-13 06:44:54.955155621 +0000 UTC m=+928.821750202" lastFinishedPulling="2025-12-13 06:44:57.498263421 +0000 UTC m=+931.364858002" observedRunningTime="2025-12-13 06:44:58.274252334 +0000 UTC m=+932.140846925" watchObservedRunningTime="2025-12-13 06:44:58.279268814 +0000 UTC m=+932.145863405" Dec 13 06:45:00 crc kubenswrapper[5048]: I1213 06:45:00.143194 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426805-cqfrx"] Dec 13 06:45:00 crc kubenswrapper[5048]: E1213 06:45:00.143807 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2b00f79-a155-447b-929d-7fd48f2260da" containerName="extract-content" Dec 13 06:45:00 crc kubenswrapper[5048]: I1213 06:45:00.143820 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2b00f79-a155-447b-929d-7fd48f2260da" containerName="extract-content" Dec 13 06:45:00 crc kubenswrapper[5048]: E1213 06:45:00.143834 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2b00f79-a155-447b-929d-7fd48f2260da" containerName="registry-server" Dec 13 06:45:00 crc kubenswrapper[5048]: I1213 06:45:00.143840 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2b00f79-a155-447b-929d-7fd48f2260da" containerName="registry-server" Dec 13 06:45:00 crc kubenswrapper[5048]: E1213 06:45:00.143852 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2b00f79-a155-447b-929d-7fd48f2260da" containerName="extract-utilities" Dec 13 06:45:00 crc kubenswrapper[5048]: I1213 06:45:00.143858 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2b00f79-a155-447b-929d-7fd48f2260da" containerName="extract-utilities" Dec 13 06:45:00 crc kubenswrapper[5048]: I1213 06:45:00.143964 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2b00f79-a155-447b-929d-7fd48f2260da" containerName="registry-server" Dec 13 06:45:00 crc kubenswrapper[5048]: I1213 06:45:00.144472 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426805-cqfrx" Dec 13 06:45:00 crc kubenswrapper[5048]: I1213 06:45:00.147224 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 13 06:45:00 crc kubenswrapper[5048]: I1213 06:45:00.147246 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 13 06:45:00 crc kubenswrapper[5048]: I1213 06:45:00.162284 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426805-cqfrx"] Dec 13 06:45:00 crc kubenswrapper[5048]: I1213 06:45:00.197855 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e6496a13-bbc2-4049-b82b-3a717d832fc7-secret-volume\") pod \"collect-profiles-29426805-cqfrx\" (UID: \"e6496a13-bbc2-4049-b82b-3a717d832fc7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426805-cqfrx" Dec 13 06:45:00 crc kubenswrapper[5048]: I1213 06:45:00.197899 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e6496a13-bbc2-4049-b82b-3a717d832fc7-config-volume\") pod \"collect-profiles-29426805-cqfrx\" (UID: \"e6496a13-bbc2-4049-b82b-3a717d832fc7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426805-cqfrx" Dec 13 06:45:00 crc kubenswrapper[5048]: I1213 06:45:00.197956 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9hlcj\" (UniqueName: \"kubernetes.io/projected/e6496a13-bbc2-4049-b82b-3a717d832fc7-kube-api-access-9hlcj\") pod \"collect-profiles-29426805-cqfrx\" (UID: \"e6496a13-bbc2-4049-b82b-3a717d832fc7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426805-cqfrx" Dec 13 06:45:00 crc kubenswrapper[5048]: I1213 06:45:00.299238 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e6496a13-bbc2-4049-b82b-3a717d832fc7-secret-volume\") pod \"collect-profiles-29426805-cqfrx\" (UID: \"e6496a13-bbc2-4049-b82b-3a717d832fc7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426805-cqfrx" Dec 13 06:45:00 crc kubenswrapper[5048]: I1213 06:45:00.299280 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e6496a13-bbc2-4049-b82b-3a717d832fc7-config-volume\") pod \"collect-profiles-29426805-cqfrx\" (UID: \"e6496a13-bbc2-4049-b82b-3a717d832fc7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426805-cqfrx" Dec 13 06:45:00 crc kubenswrapper[5048]: I1213 06:45:00.299336 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9hlcj\" (UniqueName: \"kubernetes.io/projected/e6496a13-bbc2-4049-b82b-3a717d832fc7-kube-api-access-9hlcj\") pod \"collect-profiles-29426805-cqfrx\" (UID: \"e6496a13-bbc2-4049-b82b-3a717d832fc7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426805-cqfrx" Dec 13 06:45:00 crc kubenswrapper[5048]: I1213 06:45:00.300400 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e6496a13-bbc2-4049-b82b-3a717d832fc7-config-volume\") pod \"collect-profiles-29426805-cqfrx\" (UID: \"e6496a13-bbc2-4049-b82b-3a717d832fc7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426805-cqfrx" Dec 13 06:45:00 crc kubenswrapper[5048]: I1213 06:45:00.310340 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e6496a13-bbc2-4049-b82b-3a717d832fc7-secret-volume\") pod \"collect-profiles-29426805-cqfrx\" (UID: \"e6496a13-bbc2-4049-b82b-3a717d832fc7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426805-cqfrx" Dec 13 06:45:00 crc kubenswrapper[5048]: I1213 06:45:00.328565 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9hlcj\" (UniqueName: \"kubernetes.io/projected/e6496a13-bbc2-4049-b82b-3a717d832fc7-kube-api-access-9hlcj\") pod \"collect-profiles-29426805-cqfrx\" (UID: \"e6496a13-bbc2-4049-b82b-3a717d832fc7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426805-cqfrx" Dec 13 06:45:00 crc kubenswrapper[5048]: I1213 06:45:00.459942 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426805-cqfrx" Dec 13 06:45:00 crc kubenswrapper[5048]: I1213 06:45:00.738924 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426805-cqfrx"] Dec 13 06:45:00 crc kubenswrapper[5048]: W1213 06:45:00.755932 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode6496a13_bbc2_4049_b82b_3a717d832fc7.slice/crio-f8ded9d2b7ca30a1da67fe16b17799446b3096f9af1ef369505af264aeff8dea WatchSource:0}: Error finding container f8ded9d2b7ca30a1da67fe16b17799446b3096f9af1ef369505af264aeff8dea: Status 404 returned error can't find the container with id f8ded9d2b7ca30a1da67fe16b17799446b3096f9af1ef369505af264aeff8dea Dec 13 06:45:01 crc kubenswrapper[5048]: I1213 06:45:01.293168 5048 generic.go:334] "Generic (PLEG): container finished" podID="e6496a13-bbc2-4049-b82b-3a717d832fc7" containerID="6740dc6014628f40d21daa469d719043dea8fa64585837e235a1362d71ce843b" exitCode=0 Dec 13 06:45:01 crc kubenswrapper[5048]: I1213 06:45:01.293222 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29426805-cqfrx" event={"ID":"e6496a13-bbc2-4049-b82b-3a717d832fc7","Type":"ContainerDied","Data":"6740dc6014628f40d21daa469d719043dea8fa64585837e235a1362d71ce843b"} Dec 13 06:45:01 crc kubenswrapper[5048]: I1213 06:45:01.293539 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29426805-cqfrx" event={"ID":"e6496a13-bbc2-4049-b82b-3a717d832fc7","Type":"ContainerStarted","Data":"f8ded9d2b7ca30a1da67fe16b17799446b3096f9af1ef369505af264aeff8dea"} Dec 13 06:45:02 crc kubenswrapper[5048]: I1213 06:45:02.605346 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426805-cqfrx" Dec 13 06:45:02 crc kubenswrapper[5048]: I1213 06:45:02.730742 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9hlcj\" (UniqueName: \"kubernetes.io/projected/e6496a13-bbc2-4049-b82b-3a717d832fc7-kube-api-access-9hlcj\") pod \"e6496a13-bbc2-4049-b82b-3a717d832fc7\" (UID: \"e6496a13-bbc2-4049-b82b-3a717d832fc7\") " Dec 13 06:45:02 crc kubenswrapper[5048]: I1213 06:45:02.730880 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e6496a13-bbc2-4049-b82b-3a717d832fc7-secret-volume\") pod \"e6496a13-bbc2-4049-b82b-3a717d832fc7\" (UID: \"e6496a13-bbc2-4049-b82b-3a717d832fc7\") " Dec 13 06:45:02 crc kubenswrapper[5048]: I1213 06:45:02.730911 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e6496a13-bbc2-4049-b82b-3a717d832fc7-config-volume\") pod \"e6496a13-bbc2-4049-b82b-3a717d832fc7\" (UID: \"e6496a13-bbc2-4049-b82b-3a717d832fc7\") " Dec 13 06:45:02 crc kubenswrapper[5048]: I1213 06:45:02.731722 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6496a13-bbc2-4049-b82b-3a717d832fc7-config-volume" (OuterVolumeSpecName: "config-volume") pod "e6496a13-bbc2-4049-b82b-3a717d832fc7" (UID: "e6496a13-bbc2-4049-b82b-3a717d832fc7"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:45:02 crc kubenswrapper[5048]: I1213 06:45:02.735572 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6496a13-bbc2-4049-b82b-3a717d832fc7-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "e6496a13-bbc2-4049-b82b-3a717d832fc7" (UID: "e6496a13-bbc2-4049-b82b-3a717d832fc7"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:45:02 crc kubenswrapper[5048]: I1213 06:45:02.735613 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6496a13-bbc2-4049-b82b-3a717d832fc7-kube-api-access-9hlcj" (OuterVolumeSpecName: "kube-api-access-9hlcj") pod "e6496a13-bbc2-4049-b82b-3a717d832fc7" (UID: "e6496a13-bbc2-4049-b82b-3a717d832fc7"). InnerVolumeSpecName "kube-api-access-9hlcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:45:02 crc kubenswrapper[5048]: I1213 06:45:02.832906 5048 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e6496a13-bbc2-4049-b82b-3a717d832fc7-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 13 06:45:02 crc kubenswrapper[5048]: I1213 06:45:02.832946 5048 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e6496a13-bbc2-4049-b82b-3a717d832fc7-config-volume\") on node \"crc\" DevicePath \"\"" Dec 13 06:45:02 crc kubenswrapper[5048]: I1213 06:45:02.832959 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9hlcj\" (UniqueName: \"kubernetes.io/projected/e6496a13-bbc2-4049-b82b-3a717d832fc7-kube-api-access-9hlcj\") on node \"crc\" DevicePath \"\"" Dec 13 06:45:03 crc kubenswrapper[5048]: I1213 06:45:03.306753 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29426805-cqfrx" event={"ID":"e6496a13-bbc2-4049-b82b-3a717d832fc7","Type":"ContainerDied","Data":"f8ded9d2b7ca30a1da67fe16b17799446b3096f9af1ef369505af264aeff8dea"} Dec 13 06:45:03 crc kubenswrapper[5048]: I1213 06:45:03.307117 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f8ded9d2b7ca30a1da67fe16b17799446b3096f9af1ef369505af264aeff8dea" Dec 13 06:45:03 crc kubenswrapper[5048]: I1213 06:45:03.306806 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426805-cqfrx" Dec 13 06:45:04 crc kubenswrapper[5048]: I1213 06:45:04.511745 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-qlml5" Dec 13 06:45:04 crc kubenswrapper[5048]: I1213 06:45:04.511824 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-qlml5" Dec 13 06:45:04 crc kubenswrapper[5048]: I1213 06:45:04.539709 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-qlml5" Dec 13 06:45:05 crc kubenswrapper[5048]: I1213 06:45:05.343181 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-qlml5" Dec 13 06:45:08 crc kubenswrapper[5048]: I1213 06:45:08.619736 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc"] Dec 13 06:45:08 crc kubenswrapper[5048]: E1213 06:45:08.620252 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6496a13-bbc2-4049-b82b-3a717d832fc7" containerName="collect-profiles" Dec 13 06:45:08 crc kubenswrapper[5048]: I1213 06:45:08.620271 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6496a13-bbc2-4049-b82b-3a717d832fc7" containerName="collect-profiles" Dec 13 06:45:08 crc kubenswrapper[5048]: I1213 06:45:08.620389 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6496a13-bbc2-4049-b82b-3a717d832fc7" containerName="collect-profiles" Dec 13 06:45:08 crc kubenswrapper[5048]: I1213 06:45:08.621205 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc" Dec 13 06:45:08 crc kubenswrapper[5048]: I1213 06:45:08.622950 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-6gndd" Dec 13 06:45:08 crc kubenswrapper[5048]: I1213 06:45:08.631615 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc"] Dec 13 06:45:08 crc kubenswrapper[5048]: I1213 06:45:08.736350 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/35d3bce1-ed03-4919-b1eb-09b5a630a578-bundle\") pod \"57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc\" (UID: \"35d3bce1-ed03-4919-b1eb-09b5a630a578\") " pod="openstack-operators/57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc" Dec 13 06:45:08 crc kubenswrapper[5048]: I1213 06:45:08.736426 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/35d3bce1-ed03-4919-b1eb-09b5a630a578-util\") pod \"57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc\" (UID: \"35d3bce1-ed03-4919-b1eb-09b5a630a578\") " pod="openstack-operators/57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc" Dec 13 06:45:08 crc kubenswrapper[5048]: I1213 06:45:08.736532 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5c9d6\" (UniqueName: \"kubernetes.io/projected/35d3bce1-ed03-4919-b1eb-09b5a630a578-kube-api-access-5c9d6\") pod \"57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc\" (UID: \"35d3bce1-ed03-4919-b1eb-09b5a630a578\") " pod="openstack-operators/57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc" Dec 13 06:45:08 crc kubenswrapper[5048]: I1213 06:45:08.838604 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5c9d6\" (UniqueName: \"kubernetes.io/projected/35d3bce1-ed03-4919-b1eb-09b5a630a578-kube-api-access-5c9d6\") pod \"57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc\" (UID: \"35d3bce1-ed03-4919-b1eb-09b5a630a578\") " pod="openstack-operators/57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc" Dec 13 06:45:08 crc kubenswrapper[5048]: I1213 06:45:08.838750 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/35d3bce1-ed03-4919-b1eb-09b5a630a578-bundle\") pod \"57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc\" (UID: \"35d3bce1-ed03-4919-b1eb-09b5a630a578\") " pod="openstack-operators/57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc" Dec 13 06:45:08 crc kubenswrapper[5048]: I1213 06:45:08.838879 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/35d3bce1-ed03-4919-b1eb-09b5a630a578-util\") pod \"57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc\" (UID: \"35d3bce1-ed03-4919-b1eb-09b5a630a578\") " pod="openstack-operators/57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc" Dec 13 06:45:08 crc kubenswrapper[5048]: I1213 06:45:08.839355 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/35d3bce1-ed03-4919-b1eb-09b5a630a578-bundle\") pod \"57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc\" (UID: \"35d3bce1-ed03-4919-b1eb-09b5a630a578\") " pod="openstack-operators/57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc" Dec 13 06:45:08 crc kubenswrapper[5048]: I1213 06:45:08.839880 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/35d3bce1-ed03-4919-b1eb-09b5a630a578-util\") pod \"57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc\" (UID: \"35d3bce1-ed03-4919-b1eb-09b5a630a578\") " pod="openstack-operators/57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc" Dec 13 06:45:08 crc kubenswrapper[5048]: I1213 06:45:08.862937 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5c9d6\" (UniqueName: \"kubernetes.io/projected/35d3bce1-ed03-4919-b1eb-09b5a630a578-kube-api-access-5c9d6\") pod \"57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc\" (UID: \"35d3bce1-ed03-4919-b1eb-09b5a630a578\") " pod="openstack-operators/57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc" Dec 13 06:45:08 crc kubenswrapper[5048]: I1213 06:45:08.936203 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc" Dec 13 06:45:09 crc kubenswrapper[5048]: I1213 06:45:09.396220 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc"] Dec 13 06:45:09 crc kubenswrapper[5048]: W1213 06:45:09.400245 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod35d3bce1_ed03_4919_b1eb_09b5a630a578.slice/crio-02c432227ad52193d9c4b97ead83249408027d6c85500616d64910a1163f246b WatchSource:0}: Error finding container 02c432227ad52193d9c4b97ead83249408027d6c85500616d64910a1163f246b: Status 404 returned error can't find the container with id 02c432227ad52193d9c4b97ead83249408027d6c85500616d64910a1163f246b Dec 13 06:45:10 crc kubenswrapper[5048]: I1213 06:45:10.356166 5048 generic.go:334] "Generic (PLEG): container finished" podID="35d3bce1-ed03-4919-b1eb-09b5a630a578" containerID="bfc49a9ec467f0a12808af0b6a428abf2a80deae78c2018f564d1108c96e8471" exitCode=0 Dec 13 06:45:10 crc kubenswrapper[5048]: I1213 06:45:10.356228 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc" event={"ID":"35d3bce1-ed03-4919-b1eb-09b5a630a578","Type":"ContainerDied","Data":"bfc49a9ec467f0a12808af0b6a428abf2a80deae78c2018f564d1108c96e8471"} Dec 13 06:45:10 crc kubenswrapper[5048]: I1213 06:45:10.356266 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc" event={"ID":"35d3bce1-ed03-4919-b1eb-09b5a630a578","Type":"ContainerStarted","Data":"02c432227ad52193d9c4b97ead83249408027d6c85500616d64910a1163f246b"} Dec 13 06:45:11 crc kubenswrapper[5048]: I1213 06:45:11.364211 5048 generic.go:334] "Generic (PLEG): container finished" podID="35d3bce1-ed03-4919-b1eb-09b5a630a578" containerID="0dbe4d3960c06ad130b04924062b06de552e3cce04d02b7f5923dad9b15ddd03" exitCode=0 Dec 13 06:45:11 crc kubenswrapper[5048]: I1213 06:45:11.364320 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc" event={"ID":"35d3bce1-ed03-4919-b1eb-09b5a630a578","Type":"ContainerDied","Data":"0dbe4d3960c06ad130b04924062b06de552e3cce04d02b7f5923dad9b15ddd03"} Dec 13 06:45:12 crc kubenswrapper[5048]: I1213 06:45:12.374672 5048 generic.go:334] "Generic (PLEG): container finished" podID="35d3bce1-ed03-4919-b1eb-09b5a630a578" containerID="165fb1d1f5f3b74ee64c5106b74181cef5d568774e16b7066115bdad62b3c600" exitCode=0 Dec 13 06:45:12 crc kubenswrapper[5048]: I1213 06:45:12.374783 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc" event={"ID":"35d3bce1-ed03-4919-b1eb-09b5a630a578","Type":"ContainerDied","Data":"165fb1d1f5f3b74ee64c5106b74181cef5d568774e16b7066115bdad62b3c600"} Dec 13 06:45:13 crc kubenswrapper[5048]: I1213 06:45:13.762756 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc" Dec 13 06:45:13 crc kubenswrapper[5048]: I1213 06:45:13.908600 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/35d3bce1-ed03-4919-b1eb-09b5a630a578-util\") pod \"35d3bce1-ed03-4919-b1eb-09b5a630a578\" (UID: \"35d3bce1-ed03-4919-b1eb-09b5a630a578\") " Dec 13 06:45:13 crc kubenswrapper[5048]: I1213 06:45:13.908685 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5c9d6\" (UniqueName: \"kubernetes.io/projected/35d3bce1-ed03-4919-b1eb-09b5a630a578-kube-api-access-5c9d6\") pod \"35d3bce1-ed03-4919-b1eb-09b5a630a578\" (UID: \"35d3bce1-ed03-4919-b1eb-09b5a630a578\") " Dec 13 06:45:13 crc kubenswrapper[5048]: I1213 06:45:13.908772 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/35d3bce1-ed03-4919-b1eb-09b5a630a578-bundle\") pod \"35d3bce1-ed03-4919-b1eb-09b5a630a578\" (UID: \"35d3bce1-ed03-4919-b1eb-09b5a630a578\") " Dec 13 06:45:13 crc kubenswrapper[5048]: I1213 06:45:13.909681 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35d3bce1-ed03-4919-b1eb-09b5a630a578-bundle" (OuterVolumeSpecName: "bundle") pod "35d3bce1-ed03-4919-b1eb-09b5a630a578" (UID: "35d3bce1-ed03-4919-b1eb-09b5a630a578"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:45:13 crc kubenswrapper[5048]: I1213 06:45:13.914616 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35d3bce1-ed03-4919-b1eb-09b5a630a578-kube-api-access-5c9d6" (OuterVolumeSpecName: "kube-api-access-5c9d6") pod "35d3bce1-ed03-4919-b1eb-09b5a630a578" (UID: "35d3bce1-ed03-4919-b1eb-09b5a630a578"). InnerVolumeSpecName "kube-api-access-5c9d6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:45:13 crc kubenswrapper[5048]: I1213 06:45:13.925879 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35d3bce1-ed03-4919-b1eb-09b5a630a578-util" (OuterVolumeSpecName: "util") pod "35d3bce1-ed03-4919-b1eb-09b5a630a578" (UID: "35d3bce1-ed03-4919-b1eb-09b5a630a578"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:45:14 crc kubenswrapper[5048]: I1213 06:45:14.010807 5048 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/35d3bce1-ed03-4919-b1eb-09b5a630a578-util\") on node \"crc\" DevicePath \"\"" Dec 13 06:45:14 crc kubenswrapper[5048]: I1213 06:45:14.010858 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5c9d6\" (UniqueName: \"kubernetes.io/projected/35d3bce1-ed03-4919-b1eb-09b5a630a578-kube-api-access-5c9d6\") on node \"crc\" DevicePath \"\"" Dec 13 06:45:14 crc kubenswrapper[5048]: I1213 06:45:14.010870 5048 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/35d3bce1-ed03-4919-b1eb-09b5a630a578-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:45:14 crc kubenswrapper[5048]: I1213 06:45:14.395531 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc" event={"ID":"35d3bce1-ed03-4919-b1eb-09b5a630a578","Type":"ContainerDied","Data":"02c432227ad52193d9c4b97ead83249408027d6c85500616d64910a1163f246b"} Dec 13 06:45:14 crc kubenswrapper[5048]: I1213 06:45:14.395600 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="02c432227ad52193d9c4b97ead83249408027d6c85500616d64910a1163f246b" Dec 13 06:45:14 crc kubenswrapper[5048]: I1213 06:45:14.395668 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc" Dec 13 06:45:16 crc kubenswrapper[5048]: I1213 06:45:16.216109 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:45:16 crc kubenswrapper[5048]: I1213 06:45:16.216521 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:45:18 crc kubenswrapper[5048]: I1213 06:45:18.348332 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-576c4d554c-lf2gp"] Dec 13 06:45:18 crc kubenswrapper[5048]: E1213 06:45:18.348858 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35d3bce1-ed03-4919-b1eb-09b5a630a578" containerName="extract" Dec 13 06:45:18 crc kubenswrapper[5048]: I1213 06:45:18.348869 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="35d3bce1-ed03-4919-b1eb-09b5a630a578" containerName="extract" Dec 13 06:45:18 crc kubenswrapper[5048]: E1213 06:45:18.348884 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35d3bce1-ed03-4919-b1eb-09b5a630a578" containerName="util" Dec 13 06:45:18 crc kubenswrapper[5048]: I1213 06:45:18.348890 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="35d3bce1-ed03-4919-b1eb-09b5a630a578" containerName="util" Dec 13 06:45:18 crc kubenswrapper[5048]: E1213 06:45:18.348900 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35d3bce1-ed03-4919-b1eb-09b5a630a578" containerName="pull" Dec 13 06:45:18 crc kubenswrapper[5048]: I1213 06:45:18.348907 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="35d3bce1-ed03-4919-b1eb-09b5a630a578" containerName="pull" Dec 13 06:45:18 crc kubenswrapper[5048]: I1213 06:45:18.349017 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="35d3bce1-ed03-4919-b1eb-09b5a630a578" containerName="extract" Dec 13 06:45:18 crc kubenswrapper[5048]: I1213 06:45:18.349398 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-576c4d554c-lf2gp" Dec 13 06:45:18 crc kubenswrapper[5048]: I1213 06:45:18.353078 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-b7wj5" Dec 13 06:45:18 crc kubenswrapper[5048]: I1213 06:45:18.372422 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-576c4d554c-lf2gp"] Dec 13 06:45:18 crc kubenswrapper[5048]: I1213 06:45:18.466799 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqvj5\" (UniqueName: \"kubernetes.io/projected/46a5447f-e917-4c59-8735-db6e8dce1527-kube-api-access-pqvj5\") pod \"openstack-operator-controller-operator-576c4d554c-lf2gp\" (UID: \"46a5447f-e917-4c59-8735-db6e8dce1527\") " pod="openstack-operators/openstack-operator-controller-operator-576c4d554c-lf2gp" Dec 13 06:45:18 crc kubenswrapper[5048]: I1213 06:45:18.567940 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqvj5\" (UniqueName: \"kubernetes.io/projected/46a5447f-e917-4c59-8735-db6e8dce1527-kube-api-access-pqvj5\") pod \"openstack-operator-controller-operator-576c4d554c-lf2gp\" (UID: \"46a5447f-e917-4c59-8735-db6e8dce1527\") " pod="openstack-operators/openstack-operator-controller-operator-576c4d554c-lf2gp" Dec 13 06:45:18 crc kubenswrapper[5048]: I1213 06:45:18.588983 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqvj5\" (UniqueName: \"kubernetes.io/projected/46a5447f-e917-4c59-8735-db6e8dce1527-kube-api-access-pqvj5\") pod \"openstack-operator-controller-operator-576c4d554c-lf2gp\" (UID: \"46a5447f-e917-4c59-8735-db6e8dce1527\") " pod="openstack-operators/openstack-operator-controller-operator-576c4d554c-lf2gp" Dec 13 06:45:18 crc kubenswrapper[5048]: I1213 06:45:18.680393 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-576c4d554c-lf2gp" Dec 13 06:45:19 crc kubenswrapper[5048]: I1213 06:45:19.096622 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-576c4d554c-lf2gp"] Dec 13 06:45:19 crc kubenswrapper[5048]: I1213 06:45:19.423222 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-576c4d554c-lf2gp" event={"ID":"46a5447f-e917-4c59-8735-db6e8dce1527","Type":"ContainerStarted","Data":"4dc8b7385ddd333ba6b1fdda73768e298b2b8bc01010a28a0c2a16df8ce73b8d"} Dec 13 06:45:24 crc kubenswrapper[5048]: I1213 06:45:24.462519 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-576c4d554c-lf2gp" event={"ID":"46a5447f-e917-4c59-8735-db6e8dce1527","Type":"ContainerStarted","Data":"69a364ebe819aa73b874a897f00fa13b64c1cd3864fc739043bf56575b16c616"} Dec 13 06:45:24 crc kubenswrapper[5048]: I1213 06:45:24.462988 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-576c4d554c-lf2gp" Dec 13 06:45:24 crc kubenswrapper[5048]: I1213 06:45:24.491700 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-576c4d554c-lf2gp" podStartSLOduration=1.6137629690000002 podStartE2EDuration="6.491682444s" podCreationTimestamp="2025-12-13 06:45:18 +0000 UTC" firstStartedPulling="2025-12-13 06:45:19.104136177 +0000 UTC m=+952.970730758" lastFinishedPulling="2025-12-13 06:45:23.982055652 +0000 UTC m=+957.848650233" observedRunningTime="2025-12-13 06:45:24.486659405 +0000 UTC m=+958.353254006" watchObservedRunningTime="2025-12-13 06:45:24.491682444 +0000 UTC m=+958.358277025" Dec 13 06:45:38 crc kubenswrapper[5048]: I1213 06:45:38.685049 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-576c4d554c-lf2gp" Dec 13 06:45:46 crc kubenswrapper[5048]: I1213 06:45:46.215406 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:45:46 crc kubenswrapper[5048]: I1213 06:45:46.216043 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:45:57 crc kubenswrapper[5048]: I1213 06:45:57.992192 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-95949466-xmb6d"] Dec 13 06:45:57 crc kubenswrapper[5048]: I1213 06:45:57.993424 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-95949466-xmb6d" Dec 13 06:45:57 crc kubenswrapper[5048]: I1213 06:45:57.995482 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-pgfp9" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.006596 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-5cf45c46bd-mblxt"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.008346 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-5cf45c46bd-mblxt" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.010369 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-7wmwz" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.016717 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-95949466-xmb6d"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.020999 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-5cf45c46bd-mblxt"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.028472 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7r8lk\" (UniqueName: \"kubernetes.io/projected/50877cb8-07a0-48c4-af3d-72144fa836e0-kube-api-access-7r8lk\") pod \"barbican-operator-controller-manager-95949466-xmb6d\" (UID: \"50877cb8-07a0-48c4-af3d-72144fa836e0\") " pod="openstack-operators/barbican-operator-controller-manager-95949466-xmb6d" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.028547 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rdvm6\" (UniqueName: \"kubernetes.io/projected/6d515ed0-b2e1-469e-a7c5-bbe62664979e-kube-api-access-rdvm6\") pod \"cinder-operator-controller-manager-5cf45c46bd-mblxt\" (UID: \"6d515ed0-b2e1-469e-a7c5-bbe62664979e\") " pod="openstack-operators/cinder-operator-controller-manager-5cf45c46bd-mblxt" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.041569 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-66f8b87655-ll9d4"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.042293 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-ll9d4" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.045354 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-wf56c" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.057334 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-767f9d7567-md4jk"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.058354 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-767f9d7567-md4jk" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.062772 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-66f8b87655-ll9d4"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.066667 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-zwkbf" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.073161 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-767f9d7567-md4jk"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.083105 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-59b8dcb766-sn85z"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.083815 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-59b8dcb766-sn85z" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.086541 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-qxv9p" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.101224 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-59b8dcb766-sn85z"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.138136 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqpvp\" (UniqueName: \"kubernetes.io/projected/83f0165e-ed2b-436c-9ae0-e871bd291638-kube-api-access-kqpvp\") pod \"designate-operator-controller-manager-66f8b87655-ll9d4\" (UID: \"83f0165e-ed2b-436c-9ae0-e871bd291638\") " pod="openstack-operators/designate-operator-controller-manager-66f8b87655-ll9d4" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.138220 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdvm6\" (UniqueName: \"kubernetes.io/projected/6d515ed0-b2e1-469e-a7c5-bbe62664979e-kube-api-access-rdvm6\") pod \"cinder-operator-controller-manager-5cf45c46bd-mblxt\" (UID: \"6d515ed0-b2e1-469e-a7c5-bbe62664979e\") " pod="openstack-operators/cinder-operator-controller-manager-5cf45c46bd-mblxt" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.138307 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wbpls\" (UniqueName: \"kubernetes.io/projected/4d953dd1-d113-4dc6-a80b-3ded9d08b476-kube-api-access-wbpls\") pod \"glance-operator-controller-manager-767f9d7567-md4jk\" (UID: \"4d953dd1-d113-4dc6-a80b-3ded9d08b476\") " pod="openstack-operators/glance-operator-controller-manager-767f9d7567-md4jk" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.138338 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v552j\" (UniqueName: \"kubernetes.io/projected/07f258ba-5ff1-4d34-8e07-62c024c15dba-kube-api-access-v552j\") pod \"heat-operator-controller-manager-59b8dcb766-sn85z\" (UID: \"07f258ba-5ff1-4d34-8e07-62c024c15dba\") " pod="openstack-operators/heat-operator-controller-manager-59b8dcb766-sn85z" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.138376 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7r8lk\" (UniqueName: \"kubernetes.io/projected/50877cb8-07a0-48c4-af3d-72144fa836e0-kube-api-access-7r8lk\") pod \"barbican-operator-controller-manager-95949466-xmb6d\" (UID: \"50877cb8-07a0-48c4-af3d-72144fa836e0\") " pod="openstack-operators/barbican-operator-controller-manager-95949466-xmb6d" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.153585 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-6ccf486b9-g6ggd"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.154618 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-6ccf486b9-g6ggd" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.161233 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-7vb5s" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.172796 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-6ccf486b9-g6ggd"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.177180 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7r8lk\" (UniqueName: \"kubernetes.io/projected/50877cb8-07a0-48c4-af3d-72144fa836e0-kube-api-access-7r8lk\") pod \"barbican-operator-controller-manager-95949466-xmb6d\" (UID: \"50877cb8-07a0-48c4-af3d-72144fa836e0\") " pod="openstack-operators/barbican-operator-controller-manager-95949466-xmb6d" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.179407 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdvm6\" (UniqueName: \"kubernetes.io/projected/6d515ed0-b2e1-469e-a7c5-bbe62664979e-kube-api-access-rdvm6\") pod \"cinder-operator-controller-manager-5cf45c46bd-mblxt\" (UID: \"6d515ed0-b2e1-469e-a7c5-bbe62664979e\") " pod="openstack-operators/cinder-operator-controller-manager-5cf45c46bd-mblxt" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.216890 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-f458558d7-xxj48"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.217846 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-f458558d7-xxj48" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.223868 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-97c68" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.239396 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lx7db\" (UniqueName: \"kubernetes.io/projected/9bd1a72e-ad49-46ae-a748-a21d05114b84-kube-api-access-lx7db\") pod \"horizon-operator-controller-manager-6ccf486b9-g6ggd\" (UID: \"9bd1a72e-ad49-46ae-a748-a21d05114b84\") " pod="openstack-operators/horizon-operator-controller-manager-6ccf486b9-g6ggd" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.239501 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wbpls\" (UniqueName: \"kubernetes.io/projected/4d953dd1-d113-4dc6-a80b-3ded9d08b476-kube-api-access-wbpls\") pod \"glance-operator-controller-manager-767f9d7567-md4jk\" (UID: \"4d953dd1-d113-4dc6-a80b-3ded9d08b476\") " pod="openstack-operators/glance-operator-controller-manager-767f9d7567-md4jk" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.239554 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v552j\" (UniqueName: \"kubernetes.io/projected/07f258ba-5ff1-4d34-8e07-62c024c15dba-kube-api-access-v552j\") pod \"heat-operator-controller-manager-59b8dcb766-sn85z\" (UID: \"07f258ba-5ff1-4d34-8e07-62c024c15dba\") " pod="openstack-operators/heat-operator-controller-manager-59b8dcb766-sn85z" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.239598 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qhrt\" (UniqueName: \"kubernetes.io/projected/b47e0c4b-dd06-4d55-b3d7-8e3d968df8e6-kube-api-access-2qhrt\") pod \"ironic-operator-controller-manager-f458558d7-xxj48\" (UID: \"b47e0c4b-dd06-4d55-b3d7-8e3d968df8e6\") " pod="openstack-operators/ironic-operator-controller-manager-f458558d7-xxj48" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.239639 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqpvp\" (UniqueName: \"kubernetes.io/projected/83f0165e-ed2b-436c-9ae0-e871bd291638-kube-api-access-kqpvp\") pod \"designate-operator-controller-manager-66f8b87655-ll9d4\" (UID: \"83f0165e-ed2b-436c-9ae0-e871bd291638\") " pod="openstack-operators/designate-operator-controller-manager-66f8b87655-ll9d4" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.246066 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-5c7cbf548f-fdf2l"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.247057 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-5c7cbf548f-fdf2l" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.251101 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-4nwrd" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.264098 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqpvp\" (UniqueName: \"kubernetes.io/projected/83f0165e-ed2b-436c-9ae0-e871bd291638-kube-api-access-kqpvp\") pod \"designate-operator-controller-manager-66f8b87655-ll9d4\" (UID: \"83f0165e-ed2b-436c-9ae0-e871bd291638\") " pod="openstack-operators/designate-operator-controller-manager-66f8b87655-ll9d4" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.267237 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v552j\" (UniqueName: \"kubernetes.io/projected/07f258ba-5ff1-4d34-8e07-62c024c15dba-kube-api-access-v552j\") pod \"heat-operator-controller-manager-59b8dcb766-sn85z\" (UID: \"07f258ba-5ff1-4d34-8e07-62c024c15dba\") " pod="openstack-operators/heat-operator-controller-manager-59b8dcb766-sn85z" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.269298 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-7cf9bd88b6-wswvl"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.270240 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-7cf9bd88b6-wswvl" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.276376 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wbpls\" (UniqueName: \"kubernetes.io/projected/4d953dd1-d113-4dc6-a80b-3ded9d08b476-kube-api-access-wbpls\") pod \"glance-operator-controller-manager-767f9d7567-md4jk\" (UID: \"4d953dd1-d113-4dc6-a80b-3ded9d08b476\") " pod="openstack-operators/glance-operator-controller-manager-767f9d7567-md4jk" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.279521 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.279837 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-pnzfn" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.309776 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-f458558d7-xxj48"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.314507 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-7cf9bd88b6-wswvl"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.330821 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-95949466-xmb6d" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.337504 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5fdd9786f7-mkjnl"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.341509 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5fdd9786f7-mkjnl" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.342689 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qhrt\" (UniqueName: \"kubernetes.io/projected/b47e0c4b-dd06-4d55-b3d7-8e3d968df8e6-kube-api-access-2qhrt\") pod \"ironic-operator-controller-manager-f458558d7-xxj48\" (UID: \"b47e0c4b-dd06-4d55-b3d7-8e3d968df8e6\") " pod="openstack-operators/ironic-operator-controller-manager-f458558d7-xxj48" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.342751 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d1a74609-2a57-44a5-8e88-dab8ae7fba98-cert\") pod \"infra-operator-controller-manager-7cf9bd88b6-wswvl\" (UID: \"d1a74609-2a57-44a5-8e88-dab8ae7fba98\") " pod="openstack-operators/infra-operator-controller-manager-7cf9bd88b6-wswvl" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.342785 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdnjj\" (UniqueName: \"kubernetes.io/projected/148d1633-90ad-45da-af0b-5b182ee41795-kube-api-access-fdnjj\") pod \"keystone-operator-controller-manager-5c7cbf548f-fdf2l\" (UID: \"148d1633-90ad-45da-af0b-5b182ee41795\") " pod="openstack-operators/keystone-operator-controller-manager-5c7cbf548f-fdf2l" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.343526 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-85dnz" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.343608 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mjb2s\" (UniqueName: \"kubernetes.io/projected/d1a74609-2a57-44a5-8e88-dab8ae7fba98-kube-api-access-mjb2s\") pod \"infra-operator-controller-manager-7cf9bd88b6-wswvl\" (UID: \"d1a74609-2a57-44a5-8e88-dab8ae7fba98\") " pod="openstack-operators/infra-operator-controller-manager-7cf9bd88b6-wswvl" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.343656 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lx7db\" (UniqueName: \"kubernetes.io/projected/9bd1a72e-ad49-46ae-a748-a21d05114b84-kube-api-access-lx7db\") pod \"horizon-operator-controller-manager-6ccf486b9-g6ggd\" (UID: \"9bd1a72e-ad49-46ae-a748-a21d05114b84\") " pod="openstack-operators/horizon-operator-controller-manager-6ccf486b9-g6ggd" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.344361 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-5cf45c46bd-mblxt" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.380255 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-ll9d4" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.385889 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lx7db\" (UniqueName: \"kubernetes.io/projected/9bd1a72e-ad49-46ae-a748-a21d05114b84-kube-api-access-lx7db\") pod \"horizon-operator-controller-manager-6ccf486b9-g6ggd\" (UID: \"9bd1a72e-ad49-46ae-a748-a21d05114b84\") " pod="openstack-operators/horizon-operator-controller-manager-6ccf486b9-g6ggd" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.397082 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-767f9d7567-md4jk" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.431244 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-59b8dcb766-sn85z" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.434593 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-5c7cbf548f-fdf2l"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.444659 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdnjj\" (UniqueName: \"kubernetes.io/projected/148d1633-90ad-45da-af0b-5b182ee41795-kube-api-access-fdnjj\") pod \"keystone-operator-controller-manager-5c7cbf548f-fdf2l\" (UID: \"148d1633-90ad-45da-af0b-5b182ee41795\") " pod="openstack-operators/keystone-operator-controller-manager-5c7cbf548f-fdf2l" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.444923 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mjb2s\" (UniqueName: \"kubernetes.io/projected/d1a74609-2a57-44a5-8e88-dab8ae7fba98-kube-api-access-mjb2s\") pod \"infra-operator-controller-manager-7cf9bd88b6-wswvl\" (UID: \"d1a74609-2a57-44a5-8e88-dab8ae7fba98\") " pod="openstack-operators/infra-operator-controller-manager-7cf9bd88b6-wswvl" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.445011 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5blm4\" (UniqueName: \"kubernetes.io/projected/c58196eb-9b95-450a-98ff-d852ff7125c5-kube-api-access-5blm4\") pod \"manila-operator-controller-manager-5fdd9786f7-mkjnl\" (UID: \"c58196eb-9b95-450a-98ff-d852ff7125c5\") " pod="openstack-operators/manila-operator-controller-manager-5fdd9786f7-mkjnl" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.445163 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d1a74609-2a57-44a5-8e88-dab8ae7fba98-cert\") pod \"infra-operator-controller-manager-7cf9bd88b6-wswvl\" (UID: \"d1a74609-2a57-44a5-8e88-dab8ae7fba98\") " pod="openstack-operators/infra-operator-controller-manager-7cf9bd88b6-wswvl" Dec 13 06:45:58 crc kubenswrapper[5048]: E1213 06:45:58.445325 5048 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 13 06:45:58 crc kubenswrapper[5048]: E1213 06:45:58.445450 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d1a74609-2a57-44a5-8e88-dab8ae7fba98-cert podName:d1a74609-2a57-44a5-8e88-dab8ae7fba98 nodeName:}" failed. No retries permitted until 2025-12-13 06:45:58.945417447 +0000 UTC m=+992.812012028 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d1a74609-2a57-44a5-8e88-dab8ae7fba98-cert") pod "infra-operator-controller-manager-7cf9bd88b6-wswvl" (UID: "d1a74609-2a57-44a5-8e88-dab8ae7fba98") : secret "infra-operator-webhook-server-cert" not found Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.455190 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qhrt\" (UniqueName: \"kubernetes.io/projected/b47e0c4b-dd06-4d55-b3d7-8e3d968df8e6-kube-api-access-2qhrt\") pod \"ironic-operator-controller-manager-f458558d7-xxj48\" (UID: \"b47e0c4b-dd06-4d55-b3d7-8e3d968df8e6\") " pod="openstack-operators/ironic-operator-controller-manager-f458558d7-xxj48" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.455270 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5fdd9786f7-mkjnl"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.462036 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mjb2s\" (UniqueName: \"kubernetes.io/projected/d1a74609-2a57-44a5-8e88-dab8ae7fba98-kube-api-access-mjb2s\") pod \"infra-operator-controller-manager-7cf9bd88b6-wswvl\" (UID: \"d1a74609-2a57-44a5-8e88-dab8ae7fba98\") " pod="openstack-operators/infra-operator-controller-manager-7cf9bd88b6-wswvl" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.488855 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7cd87b778f-zgrtn"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.491707 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-zgrtn" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.489052 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdnjj\" (UniqueName: \"kubernetes.io/projected/148d1633-90ad-45da-af0b-5b182ee41795-kube-api-access-fdnjj\") pod \"keystone-operator-controller-manager-5c7cbf548f-fdf2l\" (UID: \"148d1633-90ad-45da-af0b-5b182ee41795\") " pod="openstack-operators/keystone-operator-controller-manager-5c7cbf548f-fdf2l" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.494131 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-jqtjh" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.511660 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-f76f4954c-bpgwj"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.512466 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-f76f4954c-bpgwj" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.521307 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-nsqgt" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.524024 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-6ccf486b9-g6ggd" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.530014 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7cd87b778f-zgrtn"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.547527 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzntk\" (UniqueName: \"kubernetes.io/projected/72736371-ff33-45e1-a685-9e2f89dcec60-kube-api-access-lzntk\") pod \"neutron-operator-controller-manager-7cd87b778f-zgrtn\" (UID: \"72736371-ff33-45e1-a685-9e2f89dcec60\") " pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-zgrtn" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.547871 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8h8p\" (UniqueName: \"kubernetes.io/projected/1994b961-1801-4279-8c61-a901803b4a3a-kube-api-access-m8h8p\") pod \"mariadb-operator-controller-manager-f76f4954c-bpgwj\" (UID: \"1994b961-1801-4279-8c61-a901803b4a3a\") " pod="openstack-operators/mariadb-operator-controller-manager-f76f4954c-bpgwj" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.548057 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5blm4\" (UniqueName: \"kubernetes.io/projected/c58196eb-9b95-450a-98ff-d852ff7125c5-kube-api-access-5blm4\") pod \"manila-operator-controller-manager-5fdd9786f7-mkjnl\" (UID: \"c58196eb-9b95-450a-98ff-d852ff7125c5\") " pod="openstack-operators/manila-operator-controller-manager-5fdd9786f7-mkjnl" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.566732 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-f458558d7-xxj48" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.586749 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5blm4\" (UniqueName: \"kubernetes.io/projected/c58196eb-9b95-450a-98ff-d852ff7125c5-kube-api-access-5blm4\") pod \"manila-operator-controller-manager-5fdd9786f7-mkjnl\" (UID: \"c58196eb-9b95-450a-98ff-d852ff7125c5\") " pod="openstack-operators/manila-operator-controller-manager-5fdd9786f7-mkjnl" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.593519 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-fklqp"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.594369 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-fklqp" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.599175 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-f76f4954c-bpgwj"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.599824 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-7ktw6" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.655879 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-5c7cbf548f-fdf2l" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.659640 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-68c649d9d-gqsj6"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.660512 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-gqsj6" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.664935 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jx9bv\" (UniqueName: \"kubernetes.io/projected/201cc161-1f13-498b-b99a-0d9c91bdc15a-kube-api-access-jx9bv\") pod \"nova-operator-controller-manager-5fbbf8b6cc-fklqp\" (UID: \"201cc161-1f13-498b-b99a-0d9c91bdc15a\") " pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-fklqp" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.665167 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzntk\" (UniqueName: \"kubernetes.io/projected/72736371-ff33-45e1-a685-9e2f89dcec60-kube-api-access-lzntk\") pod \"neutron-operator-controller-manager-7cd87b778f-zgrtn\" (UID: \"72736371-ff33-45e1-a685-9e2f89dcec60\") " pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-zgrtn" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.665337 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8h8p\" (UniqueName: \"kubernetes.io/projected/1994b961-1801-4279-8c61-a901803b4a3a-kube-api-access-m8h8p\") pod \"mariadb-operator-controller-manager-f76f4954c-bpgwj\" (UID: \"1994b961-1801-4279-8c61-a901803b4a3a\") " pod="openstack-operators/mariadb-operator-controller-manager-f76f4954c-bpgwj" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.665767 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-9ms8w" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.679827 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-bf6d4f946-4ck8m"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.699049 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-4ck8m" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.709418 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8h8p\" (UniqueName: \"kubernetes.io/projected/1994b961-1801-4279-8c61-a901803b4a3a-kube-api-access-m8h8p\") pod \"mariadb-operator-controller-manager-f76f4954c-bpgwj\" (UID: \"1994b961-1801-4279-8c61-a901803b4a3a\") " pod="openstack-operators/mariadb-operator-controller-manager-f76f4954c-bpgwj" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.710473 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-lf77n" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.723226 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.724092 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.726502 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.726839 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-f5g7n" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.730218 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-fklqp"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.734111 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzntk\" (UniqueName: \"kubernetes.io/projected/72736371-ff33-45e1-a685-9e2f89dcec60-kube-api-access-lzntk\") pod \"neutron-operator-controller-manager-7cd87b778f-zgrtn\" (UID: \"72736371-ff33-45e1-a685-9e2f89dcec60\") " pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-zgrtn" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.735824 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-68c649d9d-gqsj6"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.745894 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-8665b56d78-g782d"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.746971 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-8665b56d78-g782d" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.751327 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-tv7l9" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.751618 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-bf6d4f946-4ck8m"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.753245 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5fdd9786f7-mkjnl" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.762637 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-5c6df8f9-sx7sf"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.763597 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-5c6df8f9-sx7sf" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.765673 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-gp6kh" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.768373 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cljkc\" (UniqueName: \"kubernetes.io/projected/659a6b7b-7ef9-4fc2-8ea4-4298020aa94c-kube-api-access-cljkc\") pod \"octavia-operator-controller-manager-68c649d9d-gqsj6\" (UID: \"659a6b7b-7ef9-4fc2-8ea4-4298020aa94c\") " pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-gqsj6" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.768454 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpmct\" (UniqueName: \"kubernetes.io/projected/db14d0ba-f68b-48a5-b69a-97399548fca1-kube-api-access-qpmct\") pod \"openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878\" (UID: \"db14d0ba-f68b-48a5-b69a-97399548fca1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.768497 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/db14d0ba-f68b-48a5-b69a-97399548fca1-cert\") pod \"openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878\" (UID: \"db14d0ba-f68b-48a5-b69a-97399548fca1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.768570 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cf9dl\" (UniqueName: \"kubernetes.io/projected/859bbc21-6f39-4712-a04f-4473b78b32eb-kube-api-access-cf9dl\") pod \"ovn-operator-controller-manager-bf6d4f946-4ck8m\" (UID: \"859bbc21-6f39-4712-a04f-4473b78b32eb\") " pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-4ck8m" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.768619 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jx9bv\" (UniqueName: \"kubernetes.io/projected/201cc161-1f13-498b-b99a-0d9c91bdc15a-kube-api-access-jx9bv\") pod \"nova-operator-controller-manager-5fbbf8b6cc-fklqp\" (UID: \"201cc161-1f13-498b-b99a-0d9c91bdc15a\") " pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-fklqp" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.775465 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.787766 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-8665b56d78-g782d"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.795454 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-97d456b9-2bg97"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.800939 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-97d456b9-2bg97" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.803257 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-5c6df8f9-sx7sf"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.804370 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jx9bv\" (UniqueName: \"kubernetes.io/projected/201cc161-1f13-498b-b99a-0d9c91bdc15a-kube-api-access-jx9bv\") pod \"nova-operator-controller-manager-5fbbf8b6cc-fklqp\" (UID: \"201cc161-1f13-498b-b99a-0d9c91bdc15a\") " pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-fklqp" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.804596 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-cnqsp" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.823967 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-zgrtn" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.848070 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-97d456b9-2bg97"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.853923 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-f76f4954c-bpgwj" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.869864 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/db14d0ba-f68b-48a5-b69a-97399548fca1-cert\") pod \"openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878\" (UID: \"db14d0ba-f68b-48a5-b69a-97399548fca1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.869944 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cf9dl\" (UniqueName: \"kubernetes.io/projected/859bbc21-6f39-4712-a04f-4473b78b32eb-kube-api-access-cf9dl\") pod \"ovn-operator-controller-manager-bf6d4f946-4ck8m\" (UID: \"859bbc21-6f39-4712-a04f-4473b78b32eb\") " pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-4ck8m" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.870096 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rsd44\" (UniqueName: \"kubernetes.io/projected/813f57d5-1063-4dbc-9847-b6ea97e46fbe-kube-api-access-rsd44\") pod \"swift-operator-controller-manager-5c6df8f9-sx7sf\" (UID: \"813f57d5-1063-4dbc-9847-b6ea97e46fbe\") " pod="openstack-operators/swift-operator-controller-manager-5c6df8f9-sx7sf" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.870116 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fsm4\" (UniqueName: \"kubernetes.io/projected/89f4ffab-8c61-4389-8f41-43cd8e2d54de-kube-api-access-7fsm4\") pod \"telemetry-operator-controller-manager-97d456b9-2bg97\" (UID: \"89f4ffab-8c61-4389-8f41-43cd8e2d54de\") " pod="openstack-operators/telemetry-operator-controller-manager-97d456b9-2bg97" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.870146 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cljkc\" (UniqueName: \"kubernetes.io/projected/659a6b7b-7ef9-4fc2-8ea4-4298020aa94c-kube-api-access-cljkc\") pod \"octavia-operator-controller-manager-68c649d9d-gqsj6\" (UID: \"659a6b7b-7ef9-4fc2-8ea4-4298020aa94c\") " pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-gqsj6" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.870173 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlj5f\" (UniqueName: \"kubernetes.io/projected/6d4d57f0-ff75-455d-8e8a-fe4947b3ee40-kube-api-access-jlj5f\") pod \"placement-operator-controller-manager-8665b56d78-g782d\" (UID: \"6d4d57f0-ff75-455d-8e8a-fe4947b3ee40\") " pod="openstack-operators/placement-operator-controller-manager-8665b56d78-g782d" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.870193 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpmct\" (UniqueName: \"kubernetes.io/projected/db14d0ba-f68b-48a5-b69a-97399548fca1-kube-api-access-qpmct\") pod \"openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878\" (UID: \"db14d0ba-f68b-48a5-b69a-97399548fca1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" Dec 13 06:45:58 crc kubenswrapper[5048]: E1213 06:45:58.870599 5048 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 13 06:45:58 crc kubenswrapper[5048]: E1213 06:45:58.870661 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db14d0ba-f68b-48a5-b69a-97399548fca1-cert podName:db14d0ba-f68b-48a5-b69a-97399548fca1 nodeName:}" failed. No retries permitted until 2025-12-13 06:45:59.370642723 +0000 UTC m=+993.237237304 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/db14d0ba-f68b-48a5-b69a-97399548fca1-cert") pod "openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" (UID: "db14d0ba-f68b-48a5-b69a-97399548fca1") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.871291 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-756ccf86c7-dv9ww"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.872179 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-756ccf86c7-dv9ww" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.874483 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-knhp6" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.895196 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cljkc\" (UniqueName: \"kubernetes.io/projected/659a6b7b-7ef9-4fc2-8ea4-4298020aa94c-kube-api-access-cljkc\") pod \"octavia-operator-controller-manager-68c649d9d-gqsj6\" (UID: \"659a6b7b-7ef9-4fc2-8ea4-4298020aa94c\") " pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-gqsj6" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.916643 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpmct\" (UniqueName: \"kubernetes.io/projected/db14d0ba-f68b-48a5-b69a-97399548fca1-kube-api-access-qpmct\") pod \"openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878\" (UID: \"db14d0ba-f68b-48a5-b69a-97399548fca1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.916540 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cf9dl\" (UniqueName: \"kubernetes.io/projected/859bbc21-6f39-4712-a04f-4473b78b32eb-kube-api-access-cf9dl\") pod \"ovn-operator-controller-manager-bf6d4f946-4ck8m\" (UID: \"859bbc21-6f39-4712-a04f-4473b78b32eb\") " pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-4ck8m" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.918275 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-756ccf86c7-dv9ww"] Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.961306 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-fklqp" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.973025 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d1a74609-2a57-44a5-8e88-dab8ae7fba98-cert\") pod \"infra-operator-controller-manager-7cf9bd88b6-wswvl\" (UID: \"d1a74609-2a57-44a5-8e88-dab8ae7fba98\") " pod="openstack-operators/infra-operator-controller-manager-7cf9bd88b6-wswvl" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.973080 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rbdb\" (UniqueName: \"kubernetes.io/projected/a046de13-f7f7-4d7c-abf3-79ed8cc60fad-kube-api-access-7rbdb\") pod \"test-operator-controller-manager-756ccf86c7-dv9ww\" (UID: \"a046de13-f7f7-4d7c-abf3-79ed8cc60fad\") " pod="openstack-operators/test-operator-controller-manager-756ccf86c7-dv9ww" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.973126 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rsd44\" (UniqueName: \"kubernetes.io/projected/813f57d5-1063-4dbc-9847-b6ea97e46fbe-kube-api-access-rsd44\") pod \"swift-operator-controller-manager-5c6df8f9-sx7sf\" (UID: \"813f57d5-1063-4dbc-9847-b6ea97e46fbe\") " pod="openstack-operators/swift-operator-controller-manager-5c6df8f9-sx7sf" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.973143 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fsm4\" (UniqueName: \"kubernetes.io/projected/89f4ffab-8c61-4389-8f41-43cd8e2d54de-kube-api-access-7fsm4\") pod \"telemetry-operator-controller-manager-97d456b9-2bg97\" (UID: \"89f4ffab-8c61-4389-8f41-43cd8e2d54de\") " pod="openstack-operators/telemetry-operator-controller-manager-97d456b9-2bg97" Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.973169 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlj5f\" (UniqueName: \"kubernetes.io/projected/6d4d57f0-ff75-455d-8e8a-fe4947b3ee40-kube-api-access-jlj5f\") pod \"placement-operator-controller-manager-8665b56d78-g782d\" (UID: \"6d4d57f0-ff75-455d-8e8a-fe4947b3ee40\") " pod="openstack-operators/placement-operator-controller-manager-8665b56d78-g782d" Dec 13 06:45:58 crc kubenswrapper[5048]: E1213 06:45:58.973562 5048 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 13 06:45:58 crc kubenswrapper[5048]: E1213 06:45:58.973600 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d1a74609-2a57-44a5-8e88-dab8ae7fba98-cert podName:d1a74609-2a57-44a5-8e88-dab8ae7fba98 nodeName:}" failed. No retries permitted until 2025-12-13 06:45:59.973587322 +0000 UTC m=+993.840181893 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d1a74609-2a57-44a5-8e88-dab8ae7fba98-cert") pod "infra-operator-controller-manager-7cf9bd88b6-wswvl" (UID: "d1a74609-2a57-44a5-8e88-dab8ae7fba98") : secret "infra-operator-webhook-server-cert" not found Dec 13 06:45:58 crc kubenswrapper[5048]: I1213 06:45:58.978913 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-55f78b7c4c-b4f6l"] Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.006822 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-55f78b7c4c-b4f6l" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.022802 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-qqfn6" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.026347 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-gqsj6" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.032973 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rsd44\" (UniqueName: \"kubernetes.io/projected/813f57d5-1063-4dbc-9847-b6ea97e46fbe-kube-api-access-rsd44\") pod \"swift-operator-controller-manager-5c6df8f9-sx7sf\" (UID: \"813f57d5-1063-4dbc-9847-b6ea97e46fbe\") " pod="openstack-operators/swift-operator-controller-manager-5c6df8f9-sx7sf" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.034488 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-55f78b7c4c-b4f6l"] Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.034495 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlj5f\" (UniqueName: \"kubernetes.io/projected/6d4d57f0-ff75-455d-8e8a-fe4947b3ee40-kube-api-access-jlj5f\") pod \"placement-operator-controller-manager-8665b56d78-g782d\" (UID: \"6d4d57f0-ff75-455d-8e8a-fe4947b3ee40\") " pod="openstack-operators/placement-operator-controller-manager-8665b56d78-g782d" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.038626 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-4ck8m" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.043527 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fsm4\" (UniqueName: \"kubernetes.io/projected/89f4ffab-8c61-4389-8f41-43cd8e2d54de-kube-api-access-7fsm4\") pod \"telemetry-operator-controller-manager-97d456b9-2bg97\" (UID: \"89f4ffab-8c61-4389-8f41-43cd8e2d54de\") " pod="openstack-operators/telemetry-operator-controller-manager-97d456b9-2bg97" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.076049 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rbdb\" (UniqueName: \"kubernetes.io/projected/a046de13-f7f7-4d7c-abf3-79ed8cc60fad-kube-api-access-7rbdb\") pod \"test-operator-controller-manager-756ccf86c7-dv9ww\" (UID: \"a046de13-f7f7-4d7c-abf3-79ed8cc60fad\") " pod="openstack-operators/test-operator-controller-manager-756ccf86c7-dv9ww" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.076154 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxqrd\" (UniqueName: \"kubernetes.io/projected/7165a29e-88bf-4194-bffa-414a675d1be5-kube-api-access-cxqrd\") pod \"watcher-operator-controller-manager-55f78b7c4c-b4f6l\" (UID: \"7165a29e-88bf-4194-bffa-414a675d1be5\") " pod="openstack-operators/watcher-operator-controller-manager-55f78b7c4c-b4f6l" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.099241 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rbdb\" (UniqueName: \"kubernetes.io/projected/a046de13-f7f7-4d7c-abf3-79ed8cc60fad-kube-api-access-7rbdb\") pod \"test-operator-controller-manager-756ccf86c7-dv9ww\" (UID: \"a046de13-f7f7-4d7c-abf3-79ed8cc60fad\") " pod="openstack-operators/test-operator-controller-manager-756ccf86c7-dv9ww" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.107041 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm"] Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.108092 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.125411 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm"] Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.125646 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-44wdw" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.125934 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.128543 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.143877 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-8665b56d78-g782d" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.157655 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvxqc"] Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.158750 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvxqc" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.170316 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-xn2fw" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.173491 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvxqc"] Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.176965 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-5c6df8f9-sx7sf" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.177613 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-metrics-certs\") pod \"openstack-operator-controller-manager-58d7cfb75d-nsmnm\" (UID: \"fea58380-4304-485b-aefc-48f9baea4126\") " pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.177692 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxtb4\" (UniqueName: \"kubernetes.io/projected/fea58380-4304-485b-aefc-48f9baea4126-kube-api-access-hxtb4\") pod \"openstack-operator-controller-manager-58d7cfb75d-nsmnm\" (UID: \"fea58380-4304-485b-aefc-48f9baea4126\") " pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.177721 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxqrd\" (UniqueName: \"kubernetes.io/projected/7165a29e-88bf-4194-bffa-414a675d1be5-kube-api-access-cxqrd\") pod \"watcher-operator-controller-manager-55f78b7c4c-b4f6l\" (UID: \"7165a29e-88bf-4194-bffa-414a675d1be5\") " pod="openstack-operators/watcher-operator-controller-manager-55f78b7c4c-b4f6l" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.177746 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-webhook-certs\") pod \"openstack-operator-controller-manager-58d7cfb75d-nsmnm\" (UID: \"fea58380-4304-485b-aefc-48f9baea4126\") " pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.211291 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-97d456b9-2bg97" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.227174 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxqrd\" (UniqueName: \"kubernetes.io/projected/7165a29e-88bf-4194-bffa-414a675d1be5-kube-api-access-cxqrd\") pod \"watcher-operator-controller-manager-55f78b7c4c-b4f6l\" (UID: \"7165a29e-88bf-4194-bffa-414a675d1be5\") " pod="openstack-operators/watcher-operator-controller-manager-55f78b7c4c-b4f6l" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.245165 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-756ccf86c7-dv9ww" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.279003 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-webhook-certs\") pod \"openstack-operator-controller-manager-58d7cfb75d-nsmnm\" (UID: \"fea58380-4304-485b-aefc-48f9baea4126\") " pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.279095 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-metrics-certs\") pod \"openstack-operator-controller-manager-58d7cfb75d-nsmnm\" (UID: \"fea58380-4304-485b-aefc-48f9baea4126\") " pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.279140 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rk292\" (UniqueName: \"kubernetes.io/projected/4b1fbe4c-27c1-4456-bf01-6a42320cb63d-kube-api-access-rk292\") pod \"rabbitmq-cluster-operator-manager-668c99d594-fvxqc\" (UID: \"4b1fbe4c-27c1-4456-bf01-6a42320cb63d\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvxqc" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.279183 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxtb4\" (UniqueName: \"kubernetes.io/projected/fea58380-4304-485b-aefc-48f9baea4126-kube-api-access-hxtb4\") pod \"openstack-operator-controller-manager-58d7cfb75d-nsmnm\" (UID: \"fea58380-4304-485b-aefc-48f9baea4126\") " pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:45:59 crc kubenswrapper[5048]: E1213 06:45:59.279510 5048 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 13 06:45:59 crc kubenswrapper[5048]: E1213 06:45:59.279578 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-metrics-certs podName:fea58380-4304-485b-aefc-48f9baea4126 nodeName:}" failed. No retries permitted until 2025-12-13 06:45:59.779561327 +0000 UTC m=+993.646155908 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-metrics-certs") pod "openstack-operator-controller-manager-58d7cfb75d-nsmnm" (UID: "fea58380-4304-485b-aefc-48f9baea4126") : secret "metrics-server-cert" not found Dec 13 06:45:59 crc kubenswrapper[5048]: E1213 06:45:59.279597 5048 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 13 06:45:59 crc kubenswrapper[5048]: E1213 06:45:59.279640 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-webhook-certs podName:fea58380-4304-485b-aefc-48f9baea4126 nodeName:}" failed. No retries permitted until 2025-12-13 06:45:59.779624869 +0000 UTC m=+993.646219440 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-webhook-certs") pod "openstack-operator-controller-manager-58d7cfb75d-nsmnm" (UID: "fea58380-4304-485b-aefc-48f9baea4126") : secret "webhook-server-cert" not found Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.294228 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-5cf45c46bd-mblxt"] Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.329444 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxtb4\" (UniqueName: \"kubernetes.io/projected/fea58380-4304-485b-aefc-48f9baea4126-kube-api-access-hxtb4\") pod \"openstack-operator-controller-manager-58d7cfb75d-nsmnm\" (UID: \"fea58380-4304-485b-aefc-48f9baea4126\") " pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.343344 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-59b8dcb766-sn85z"] Dec 13 06:45:59 crc kubenswrapper[5048]: W1213 06:45:59.343932 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6d515ed0_b2e1_469e_a7c5_bbe62664979e.slice/crio-34c8f760991698bb8d8514796b2713d2766d08f85231da8b5134c440cb8f4fed WatchSource:0}: Error finding container 34c8f760991698bb8d8514796b2713d2766d08f85231da8b5134c440cb8f4fed: Status 404 returned error can't find the container with id 34c8f760991698bb8d8514796b2713d2766d08f85231da8b5134c440cb8f4fed Dec 13 06:45:59 crc kubenswrapper[5048]: W1213 06:45:59.346943 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod07f258ba_5ff1_4d34_8e07_62c024c15dba.slice/crio-390828addae0e51c850679a7f08599d6e66eeb9c6087ec5942c844f37402dde3 WatchSource:0}: Error finding container 390828addae0e51c850679a7f08599d6e66eeb9c6087ec5942c844f37402dde3: Status 404 returned error can't find the container with id 390828addae0e51c850679a7f08599d6e66eeb9c6087ec5942c844f37402dde3 Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.380003 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/db14d0ba-f68b-48a5-b69a-97399548fca1-cert\") pod \"openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878\" (UID: \"db14d0ba-f68b-48a5-b69a-97399548fca1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.380363 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rk292\" (UniqueName: \"kubernetes.io/projected/4b1fbe4c-27c1-4456-bf01-6a42320cb63d-kube-api-access-rk292\") pod \"rabbitmq-cluster-operator-manager-668c99d594-fvxqc\" (UID: \"4b1fbe4c-27c1-4456-bf01-6a42320cb63d\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvxqc" Dec 13 06:45:59 crc kubenswrapper[5048]: E1213 06:45:59.380257 5048 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 13 06:45:59 crc kubenswrapper[5048]: E1213 06:45:59.380708 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db14d0ba-f68b-48a5-b69a-97399548fca1-cert podName:db14d0ba-f68b-48a5-b69a-97399548fca1 nodeName:}" failed. No retries permitted until 2025-12-13 06:46:00.380695264 +0000 UTC m=+994.247289835 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/db14d0ba-f68b-48a5-b69a-97399548fca1-cert") pod "openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" (UID: "db14d0ba-f68b-48a5-b69a-97399548fca1") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.392337 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-767f9d7567-md4jk"] Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.408018 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-55f78b7c4c-b4f6l" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.409383 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rk292\" (UniqueName: \"kubernetes.io/projected/4b1fbe4c-27c1-4456-bf01-6a42320cb63d-kube-api-access-rk292\") pod \"rabbitmq-cluster-operator-manager-668c99d594-fvxqc\" (UID: \"4b1fbe4c-27c1-4456-bf01-6a42320cb63d\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvxqc" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.589460 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvxqc" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.593022 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-6ccf486b9-g6ggd"] Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.635500 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-66f8b87655-ll9d4"] Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.646010 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-f458558d7-xxj48"] Dec 13 06:45:59 crc kubenswrapper[5048]: W1213 06:45:59.665354 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb47e0c4b_dd06_4d55_b3d7_8e3d968df8e6.slice/crio-3ebbb77e85b1873ed002d7bc9ad42c124a834ff3b6d9ff482566ea78bc22b724 WatchSource:0}: Error finding container 3ebbb77e85b1873ed002d7bc9ad42c124a834ff3b6d9ff482566ea78bc22b724: Status 404 returned error can't find the container with id 3ebbb77e85b1873ed002d7bc9ad42c124a834ff3b6d9ff482566ea78bc22b724 Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.668855 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-95949466-xmb6d"] Dec 13 06:45:59 crc kubenswrapper[5048]: W1213 06:45:59.684562 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod83f0165e_ed2b_436c_9ae0_e871bd291638.slice/crio-3c3a503cac5c18d0d3e93cd8ede684d913612bf13e4cee94f77b7089fd7b5fc6 WatchSource:0}: Error finding container 3c3a503cac5c18d0d3e93cd8ede684d913612bf13e4cee94f77b7089fd7b5fc6: Status 404 returned error can't find the container with id 3c3a503cac5c18d0d3e93cd8ede684d913612bf13e4cee94f77b7089fd7b5fc6 Dec 13 06:45:59 crc kubenswrapper[5048]: W1213 06:45:59.694764 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod50877cb8_07a0_48c4_af3d_72144fa836e0.slice/crio-be981b39446cd35c597ed3c50d8e94374ed9760823276c45285674bc798c1eb4 WatchSource:0}: Error finding container be981b39446cd35c597ed3c50d8e94374ed9760823276c45285674bc798c1eb4: Status 404 returned error can't find the container with id be981b39446cd35c597ed3c50d8e94374ed9760823276c45285674bc798c1eb4 Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.758633 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-95949466-xmb6d" event={"ID":"50877cb8-07a0-48c4-af3d-72144fa836e0","Type":"ContainerStarted","Data":"be981b39446cd35c597ed3c50d8e94374ed9760823276c45285674bc798c1eb4"} Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.760477 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-5cf45c46bd-mblxt" event={"ID":"6d515ed0-b2e1-469e-a7c5-bbe62664979e","Type":"ContainerStarted","Data":"34c8f760991698bb8d8514796b2713d2766d08f85231da8b5134c440cb8f4fed"} Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.762253 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-ll9d4" event={"ID":"83f0165e-ed2b-436c-9ae0-e871bd291638","Type":"ContainerStarted","Data":"3c3a503cac5c18d0d3e93cd8ede684d913612bf13e4cee94f77b7089fd7b5fc6"} Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.764947 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-6ccf486b9-g6ggd" event={"ID":"9bd1a72e-ad49-46ae-a748-a21d05114b84","Type":"ContainerStarted","Data":"8de4be7b896b50f917955c20e44766d4e69d700595a461b62d2fdc01148263f8"} Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.768483 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-767f9d7567-md4jk" event={"ID":"4d953dd1-d113-4dc6-a80b-3ded9d08b476","Type":"ContainerStarted","Data":"04b6e92f1d760ca38ca10bd32cbd2f7b87cf81dccdba98aa38c6040ead2d3f08"} Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.785892 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-59b8dcb766-sn85z" event={"ID":"07f258ba-5ff1-4d34-8e07-62c024c15dba","Type":"ContainerStarted","Data":"390828addae0e51c850679a7f08599d6e66eeb9c6087ec5942c844f37402dde3"} Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.786524 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-webhook-certs\") pod \"openstack-operator-controller-manager-58d7cfb75d-nsmnm\" (UID: \"fea58380-4304-485b-aefc-48f9baea4126\") " pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.786639 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-metrics-certs\") pod \"openstack-operator-controller-manager-58d7cfb75d-nsmnm\" (UID: \"fea58380-4304-485b-aefc-48f9baea4126\") " pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:45:59 crc kubenswrapper[5048]: E1213 06:45:59.786801 5048 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 13 06:45:59 crc kubenswrapper[5048]: E1213 06:45:59.786863 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-metrics-certs podName:fea58380-4304-485b-aefc-48f9baea4126 nodeName:}" failed. No retries permitted until 2025-12-13 06:46:00.786844452 +0000 UTC m=+994.653439043 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-metrics-certs") pod "openstack-operator-controller-manager-58d7cfb75d-nsmnm" (UID: "fea58380-4304-485b-aefc-48f9baea4126") : secret "metrics-server-cert" not found Dec 13 06:45:59 crc kubenswrapper[5048]: E1213 06:45:59.786801 5048 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 13 06:45:59 crc kubenswrapper[5048]: E1213 06:45:59.786989 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-webhook-certs podName:fea58380-4304-485b-aefc-48f9baea4126 nodeName:}" failed. No retries permitted until 2025-12-13 06:46:00.786957965 +0000 UTC m=+994.653552586 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-webhook-certs") pod "openstack-operator-controller-manager-58d7cfb75d-nsmnm" (UID: "fea58380-4304-485b-aefc-48f9baea4126") : secret "webhook-server-cert" not found Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.791560 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-f458558d7-xxj48" event={"ID":"b47e0c4b-dd06-4d55-b3d7-8e3d968df8e6","Type":"ContainerStarted","Data":"3ebbb77e85b1873ed002d7bc9ad42c124a834ff3b6d9ff482566ea78bc22b724"} Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.836503 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-5c7cbf548f-fdf2l"] Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.853101 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5fdd9786f7-mkjnl"] Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.878593 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7cd87b778f-zgrtn"] Dec 13 06:45:59 crc kubenswrapper[5048]: W1213 06:45:59.879928 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc58196eb_9b95_450a_98ff_d852ff7125c5.slice/crio-c350799272c9a2537333267278063d39802b14ff023d44028ba31e324446a614 WatchSource:0}: Error finding container c350799272c9a2537333267278063d39802b14ff023d44028ba31e324446a614: Status 404 returned error can't find the container with id c350799272c9a2537333267278063d39802b14ff023d44028ba31e324446a614 Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.889291 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-f76f4954c-bpgwj"] Dec 13 06:45:59 crc kubenswrapper[5048]: W1213 06:45:59.891128 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72736371_ff33_45e1_a685_9e2f89dcec60.slice/crio-c0062586e691c33fe3fef8b7c21e8b204e6b9137dc309ab74116e317441918f8 WatchSource:0}: Error finding container c0062586e691c33fe3fef8b7c21e8b204e6b9137dc309ab74116e317441918f8: Status 404 returned error can't find the container with id c0062586e691c33fe3fef8b7c21e8b204e6b9137dc309ab74116e317441918f8 Dec 13 06:45:59 crc kubenswrapper[5048]: W1213 06:45:59.891966 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1994b961_1801_4279_8c61_a901803b4a3a.slice/crio-7c3bb3c4c4e75d94ca99a3e02c667483d53f02ec44aca501e936f417e08e0a50 WatchSource:0}: Error finding container 7c3bb3c4c4e75d94ca99a3e02c667483d53f02ec44aca501e936f417e08e0a50: Status 404 returned error can't find the container with id 7c3bb3c4c4e75d94ca99a3e02c667483d53f02ec44aca501e936f417e08e0a50 Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.972162 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-fklqp"] Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.977504 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-5c6df8f9-sx7sf"] Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.989487 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d1a74609-2a57-44a5-8e88-dab8ae7fba98-cert\") pod \"infra-operator-controller-manager-7cf9bd88b6-wswvl\" (UID: \"d1a74609-2a57-44a5-8e88-dab8ae7fba98\") " pod="openstack-operators/infra-operator-controller-manager-7cf9bd88b6-wswvl" Dec 13 06:45:59 crc kubenswrapper[5048]: E1213 06:45:59.989654 5048 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 13 06:45:59 crc kubenswrapper[5048]: E1213 06:45:59.989719 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d1a74609-2a57-44a5-8e88-dab8ae7fba98-cert podName:d1a74609-2a57-44a5-8e88-dab8ae7fba98 nodeName:}" failed. No retries permitted until 2025-12-13 06:46:01.989701183 +0000 UTC m=+995.856295764 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d1a74609-2a57-44a5-8e88-dab8ae7fba98-cert") pod "infra-operator-controller-manager-7cf9bd88b6-wswvl" (UID: "d1a74609-2a57-44a5-8e88-dab8ae7fba98") : secret "infra-operator-webhook-server-cert" not found Dec 13 06:45:59 crc kubenswrapper[5048]: I1213 06:45:59.991795 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-68c649d9d-gqsj6"] Dec 13 06:46:00 crc kubenswrapper[5048]: I1213 06:46:00.004582 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-bf6d4f946-4ck8m"] Dec 13 06:46:00 crc kubenswrapper[5048]: I1213 06:46:00.089614 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvxqc"] Dec 13 06:46:00 crc kubenswrapper[5048]: W1213 06:46:00.145670 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod659a6b7b_7ef9_4fc2_8ea4_4298020aa94c.slice/crio-0030be03e3ce5588bac4cfab059e0a7f92d976bb5fc98ed70f00c7cd1ec46fda WatchSource:0}: Error finding container 0030be03e3ce5588bac4cfab059e0a7f92d976bb5fc98ed70f00c7cd1ec46fda: Status 404 returned error can't find the container with id 0030be03e3ce5588bac4cfab059e0a7f92d976bb5fc98ed70f00c7cd1ec46fda Dec 13 06:46:00 crc kubenswrapper[5048]: W1213 06:46:00.153255 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4b1fbe4c_27c1_4456_bf01_6a42320cb63d.slice/crio-244d60a265e4b51a90e23f0b09c564ebef7948433aa971c96c586260aab1fe7b WatchSource:0}: Error finding container 244d60a265e4b51a90e23f0b09c564ebef7948433aa971c96c586260aab1fe7b: Status 404 returned error can't find the container with id 244d60a265e4b51a90e23f0b09c564ebef7948433aa971c96c586260aab1fe7b Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.170515 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:3aa109bb973253ae9dcf339b9b65abbd1176cdb4be672c93e538a5f113816991,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rsd44,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-5c6df8f9-sx7sf_openstack-operators(813f57d5-1063-4dbc-9847-b6ea97e46fbe): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 13 06:46:00 crc kubenswrapper[5048]: W1213 06:46:00.171295 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod859bbc21_6f39_4712_a04f_4473b78b32eb.slice/crio-a8bceee942a8e891bf8b526cce4ae97f8de513ecb9982331dfb707b0051df8aa WatchSource:0}: Error finding container a8bceee942a8e891bf8b526cce4ae97f8de513ecb9982331dfb707b0051df8aa: Status 404 returned error can't find the container with id a8bceee942a8e891bf8b526cce4ae97f8de513ecb9982331dfb707b0051df8aa Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.173013 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/swift-operator-controller-manager-5c6df8f9-sx7sf" podUID="813f57d5-1063-4dbc-9847-b6ea97e46fbe" Dec 13 06:46:00 crc kubenswrapper[5048]: I1213 06:46:00.180557 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-8665b56d78-g782d"] Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.187877 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cf9dl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-bf6d4f946-4ck8m_openstack-operators(859bbc21-6f39-4712-a04f-4473b78b32eb): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 13 06:46:00 crc kubenswrapper[5048]: I1213 06:46:00.188150 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-97d456b9-2bg97"] Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.189003 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-4ck8m" podUID="859bbc21-6f39-4712-a04f-4473b78b32eb" Dec 13 06:46:00 crc kubenswrapper[5048]: W1213 06:46:00.194687 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6d4d57f0_ff75_455d_8e8a_fe4947b3ee40.slice/crio-60bc3a260ba8dbbf8daca6dcc820b906c3cfaeb5a846b55bbe1d7864f40bd321 WatchSource:0}: Error finding container 60bc3a260ba8dbbf8daca6dcc820b906c3cfaeb5a846b55bbe1d7864f40bd321: Status 404 returned error can't find the container with id 60bc3a260ba8dbbf8daca6dcc820b906c3cfaeb5a846b55bbe1d7864f40bd321 Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.197339 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jlj5f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-8665b56d78-g782d_openstack-operators(6d4d57f0-ff75-455d-8e8a-fe4947b3ee40): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.198507 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/placement-operator-controller-manager-8665b56d78-g782d" podUID="6d4d57f0-ff75-455d-8e8a-fe4947b3ee40" Dec 13 06:46:00 crc kubenswrapper[5048]: W1213 06:46:00.199562 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod89f4ffab_8c61_4389_8f41_43cd8e2d54de.slice/crio-7377070cfcb52432771339951df4d01bf4f4dba23c16f8cb63418f7a61e90c18 WatchSource:0}: Error finding container 7377070cfcb52432771339951df4d01bf4f4dba23c16f8cb63418f7a61e90c18: Status 404 returned error can't find the container with id 7377070cfcb52432771339951df4d01bf4f4dba23c16f8cb63418f7a61e90c18 Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.206293 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:f27e732ec1faee765461bf137d9be81278b2fa39675019a73622755e1e610b6f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7fsm4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-97d456b9-2bg97_openstack-operators(89f4ffab-8c61-4389-8f41-43cd8e2d54de): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.207699 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/telemetry-operator-controller-manager-97d456b9-2bg97" podUID="89f4ffab-8c61-4389-8f41-43cd8e2d54de" Dec 13 06:46:00 crc kubenswrapper[5048]: I1213 06:46:00.248104 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-55f78b7c4c-b4f6l"] Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.250453 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:961417d59f527d925ac48ff6a11de747d0493315e496e34dc83d76a1a1fff58a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cxqrd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-55f78b7c4c-b4f6l_openstack-operators(7165a29e-88bf-4194-bffa-414a675d1be5): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.253799 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-55f78b7c4c-b4f6l" podUID="7165a29e-88bf-4194-bffa-414a675d1be5" Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.262999 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7rbdb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-756ccf86c7-dv9ww_openstack-operators(a046de13-f7f7-4d7c-abf3-79ed8cc60fad): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.264289 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-756ccf86c7-dv9ww" podUID="a046de13-f7f7-4d7c-abf3-79ed8cc60fad" Dec 13 06:46:00 crc kubenswrapper[5048]: I1213 06:46:00.264505 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-756ccf86c7-dv9ww"] Dec 13 06:46:00 crc kubenswrapper[5048]: I1213 06:46:00.395922 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/db14d0ba-f68b-48a5-b69a-97399548fca1-cert\") pod \"openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878\" (UID: \"db14d0ba-f68b-48a5-b69a-97399548fca1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.396071 5048 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.396161 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db14d0ba-f68b-48a5-b69a-97399548fca1-cert podName:db14d0ba-f68b-48a5-b69a-97399548fca1 nodeName:}" failed. No retries permitted until 2025-12-13 06:46:02.396140158 +0000 UTC m=+996.262734739 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/db14d0ba-f68b-48a5-b69a-97399548fca1-cert") pod "openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" (UID: "db14d0ba-f68b-48a5-b69a-97399548fca1") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 13 06:46:00 crc kubenswrapper[5048]: I1213 06:46:00.799801 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-55f78b7c4c-b4f6l" event={"ID":"7165a29e-88bf-4194-bffa-414a675d1be5","Type":"ContainerStarted","Data":"b22ce4adb5c2cadb2e551cd873874e0192df93cd9788cf9b8382c102e3bfda4c"} Dec 13 06:46:00 crc kubenswrapper[5048]: I1213 06:46:00.801024 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvxqc" event={"ID":"4b1fbe4c-27c1-4456-bf01-6a42320cb63d","Type":"ContainerStarted","Data":"244d60a265e4b51a90e23f0b09c564ebef7948433aa971c96c586260aab1fe7b"} Dec 13 06:46:00 crc kubenswrapper[5048]: I1213 06:46:00.802552 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-metrics-certs\") pod \"openstack-operator-controller-manager-58d7cfb75d-nsmnm\" (UID: \"fea58380-4304-485b-aefc-48f9baea4126\") " pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:46:00 crc kubenswrapper[5048]: I1213 06:46:00.802650 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-webhook-certs\") pod \"openstack-operator-controller-manager-58d7cfb75d-nsmnm\" (UID: \"fea58380-4304-485b-aefc-48f9baea4126\") " pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.802774 5048 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.802868 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-webhook-certs podName:fea58380-4304-485b-aefc-48f9baea4126 nodeName:}" failed. No retries permitted until 2025-12-13 06:46:02.80284806 +0000 UTC m=+996.669442651 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-webhook-certs") pod "openstack-operator-controller-manager-58d7cfb75d-nsmnm" (UID: "fea58380-4304-485b-aefc-48f9baea4126") : secret "webhook-server-cert" not found Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.803357 5048 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.803402 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-metrics-certs podName:fea58380-4304-485b-aefc-48f9baea4126 nodeName:}" failed. No retries permitted until 2025-12-13 06:46:02.803389466 +0000 UTC m=+996.669984047 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-metrics-certs") pod "openstack-operator-controller-manager-58d7cfb75d-nsmnm" (UID: "fea58380-4304-485b-aefc-48f9baea4126") : secret "metrics-server-cert" not found Dec 13 06:46:00 crc kubenswrapper[5048]: I1213 06:46:00.804077 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-gqsj6" event={"ID":"659a6b7b-7ef9-4fc2-8ea4-4298020aa94c","Type":"ContainerStarted","Data":"0030be03e3ce5588bac4cfab059e0a7f92d976bb5fc98ed70f00c7cd1ec46fda"} Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.805177 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:961417d59f527d925ac48ff6a11de747d0493315e496e34dc83d76a1a1fff58a\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-55f78b7c4c-b4f6l" podUID="7165a29e-88bf-4194-bffa-414a675d1be5" Dec 13 06:46:00 crc kubenswrapper[5048]: I1213 06:46:00.805651 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5c6df8f9-sx7sf" event={"ID":"813f57d5-1063-4dbc-9847-b6ea97e46fbe","Type":"ContainerStarted","Data":"57ee771c3fe0707eefedf5e938ae77ad64714f29509483936486843ba7167b92"} Dec 13 06:46:00 crc kubenswrapper[5048]: I1213 06:46:00.807815 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5fdd9786f7-mkjnl" event={"ID":"c58196eb-9b95-450a-98ff-d852ff7125c5","Type":"ContainerStarted","Data":"c350799272c9a2537333267278063d39802b14ff023d44028ba31e324446a614"} Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.808232 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:3aa109bb973253ae9dcf339b9b65abbd1176cdb4be672c93e538a5f113816991\\\"\"" pod="openstack-operators/swift-operator-controller-manager-5c6df8f9-sx7sf" podUID="813f57d5-1063-4dbc-9847-b6ea97e46fbe" Dec 13 06:46:00 crc kubenswrapper[5048]: I1213 06:46:00.809565 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-756ccf86c7-dv9ww" event={"ID":"a046de13-f7f7-4d7c-abf3-79ed8cc60fad","Type":"ContainerStarted","Data":"594f702d2f3c7757e2a814e2ce8297d0f3ec070c582244448df243af43a53f8b"} Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.811829 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94\\\"\"" pod="openstack-operators/test-operator-controller-manager-756ccf86c7-dv9ww" podUID="a046de13-f7f7-4d7c-abf3-79ed8cc60fad" Dec 13 06:46:00 crc kubenswrapper[5048]: I1213 06:46:00.812077 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-f76f4954c-bpgwj" event={"ID":"1994b961-1801-4279-8c61-a901803b4a3a","Type":"ContainerStarted","Data":"7c3bb3c4c4e75d94ca99a3e02c667483d53f02ec44aca501e936f417e08e0a50"} Dec 13 06:46:00 crc kubenswrapper[5048]: I1213 06:46:00.813954 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-4ck8m" event={"ID":"859bbc21-6f39-4712-a04f-4473b78b32eb","Type":"ContainerStarted","Data":"a8bceee942a8e891bf8b526cce4ae97f8de513ecb9982331dfb707b0051df8aa"} Dec 13 06:46:00 crc kubenswrapper[5048]: I1213 06:46:00.818258 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-5c7cbf548f-fdf2l" event={"ID":"148d1633-90ad-45da-af0b-5b182ee41795","Type":"ContainerStarted","Data":"c7e8ad4a18e2d6751d87f965367f0b124db3bae4d2b4fc3b30b661eb92db48f4"} Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.819053 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-4ck8m" podUID="859bbc21-6f39-4712-a04f-4473b78b32eb" Dec 13 06:46:00 crc kubenswrapper[5048]: I1213 06:46:00.822087 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-zgrtn" event={"ID":"72736371-ff33-45e1-a685-9e2f89dcec60","Type":"ContainerStarted","Data":"c0062586e691c33fe3fef8b7c21e8b204e6b9137dc309ab74116e317441918f8"} Dec 13 06:46:00 crc kubenswrapper[5048]: I1213 06:46:00.829020 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-fklqp" event={"ID":"201cc161-1f13-498b-b99a-0d9c91bdc15a","Type":"ContainerStarted","Data":"4e02a78ef69b0ecca935aecb3685ff91f8e5208e9211b8e4957e1b857db9a03d"} Dec 13 06:46:00 crc kubenswrapper[5048]: I1213 06:46:00.830650 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-8665b56d78-g782d" event={"ID":"6d4d57f0-ff75-455d-8e8a-fe4947b3ee40","Type":"ContainerStarted","Data":"60bc3a260ba8dbbf8daca6dcc820b906c3cfaeb5a846b55bbe1d7864f40bd321"} Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.832761 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f\\\"\"" pod="openstack-operators/placement-operator-controller-manager-8665b56d78-g782d" podUID="6d4d57f0-ff75-455d-8e8a-fe4947b3ee40" Dec 13 06:46:00 crc kubenswrapper[5048]: I1213 06:46:00.834208 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-97d456b9-2bg97" event={"ID":"89f4ffab-8c61-4389-8f41-43cd8e2d54de","Type":"ContainerStarted","Data":"7377070cfcb52432771339951df4d01bf4f4dba23c16f8cb63418f7a61e90c18"} Dec 13 06:46:00 crc kubenswrapper[5048]: E1213 06:46:00.837677 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:f27e732ec1faee765461bf137d9be81278b2fa39675019a73622755e1e610b6f\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-97d456b9-2bg97" podUID="89f4ffab-8c61-4389-8f41-43cd8e2d54de" Dec 13 06:46:01 crc kubenswrapper[5048]: E1213 06:46:01.844900 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:961417d59f527d925ac48ff6a11de747d0493315e496e34dc83d76a1a1fff58a\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-55f78b7c4c-b4f6l" podUID="7165a29e-88bf-4194-bffa-414a675d1be5" Dec 13 06:46:01 crc kubenswrapper[5048]: E1213 06:46:01.846602 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f\\\"\"" pod="openstack-operators/placement-operator-controller-manager-8665b56d78-g782d" podUID="6d4d57f0-ff75-455d-8e8a-fe4947b3ee40" Dec 13 06:46:01 crc kubenswrapper[5048]: E1213 06:46:01.846652 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:3aa109bb973253ae9dcf339b9b65abbd1176cdb4be672c93e538a5f113816991\\\"\"" pod="openstack-operators/swift-operator-controller-manager-5c6df8f9-sx7sf" podUID="813f57d5-1063-4dbc-9847-b6ea97e46fbe" Dec 13 06:46:01 crc kubenswrapper[5048]: E1213 06:46:01.846693 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-4ck8m" podUID="859bbc21-6f39-4712-a04f-4473b78b32eb" Dec 13 06:46:01 crc kubenswrapper[5048]: E1213 06:46:01.846733 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:f27e732ec1faee765461bf137d9be81278b2fa39675019a73622755e1e610b6f\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-97d456b9-2bg97" podUID="89f4ffab-8c61-4389-8f41-43cd8e2d54de" Dec 13 06:46:01 crc kubenswrapper[5048]: E1213 06:46:01.846775 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94\\\"\"" pod="openstack-operators/test-operator-controller-manager-756ccf86c7-dv9ww" podUID="a046de13-f7f7-4d7c-abf3-79ed8cc60fad" Dec 13 06:46:02 crc kubenswrapper[5048]: I1213 06:46:02.033798 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d1a74609-2a57-44a5-8e88-dab8ae7fba98-cert\") pod \"infra-operator-controller-manager-7cf9bd88b6-wswvl\" (UID: \"d1a74609-2a57-44a5-8e88-dab8ae7fba98\") " pod="openstack-operators/infra-operator-controller-manager-7cf9bd88b6-wswvl" Dec 13 06:46:02 crc kubenswrapper[5048]: E1213 06:46:02.033910 5048 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 13 06:46:02 crc kubenswrapper[5048]: E1213 06:46:02.034004 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d1a74609-2a57-44a5-8e88-dab8ae7fba98-cert podName:d1a74609-2a57-44a5-8e88-dab8ae7fba98 nodeName:}" failed. No retries permitted until 2025-12-13 06:46:06.033978291 +0000 UTC m=+999.900572932 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d1a74609-2a57-44a5-8e88-dab8ae7fba98-cert") pod "infra-operator-controller-manager-7cf9bd88b6-wswvl" (UID: "d1a74609-2a57-44a5-8e88-dab8ae7fba98") : secret "infra-operator-webhook-server-cert" not found Dec 13 06:46:02 crc kubenswrapper[5048]: I1213 06:46:02.440479 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/db14d0ba-f68b-48a5-b69a-97399548fca1-cert\") pod \"openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878\" (UID: \"db14d0ba-f68b-48a5-b69a-97399548fca1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" Dec 13 06:46:02 crc kubenswrapper[5048]: E1213 06:46:02.440699 5048 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 13 06:46:02 crc kubenswrapper[5048]: E1213 06:46:02.440784 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db14d0ba-f68b-48a5-b69a-97399548fca1-cert podName:db14d0ba-f68b-48a5-b69a-97399548fca1 nodeName:}" failed. No retries permitted until 2025-12-13 06:46:06.440762706 +0000 UTC m=+1000.307357337 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/db14d0ba-f68b-48a5-b69a-97399548fca1-cert") pod "openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" (UID: "db14d0ba-f68b-48a5-b69a-97399548fca1") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 13 06:46:02 crc kubenswrapper[5048]: I1213 06:46:02.851125 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-metrics-certs\") pod \"openstack-operator-controller-manager-58d7cfb75d-nsmnm\" (UID: \"fea58380-4304-485b-aefc-48f9baea4126\") " pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:46:02 crc kubenswrapper[5048]: E1213 06:46:02.851313 5048 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 13 06:46:02 crc kubenswrapper[5048]: E1213 06:46:02.851374 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-metrics-certs podName:fea58380-4304-485b-aefc-48f9baea4126 nodeName:}" failed. No retries permitted until 2025-12-13 06:46:06.851356446 +0000 UTC m=+1000.717951027 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-metrics-certs") pod "openstack-operator-controller-manager-58d7cfb75d-nsmnm" (UID: "fea58380-4304-485b-aefc-48f9baea4126") : secret "metrics-server-cert" not found Dec 13 06:46:02 crc kubenswrapper[5048]: I1213 06:46:02.851315 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-webhook-certs\") pod \"openstack-operator-controller-manager-58d7cfb75d-nsmnm\" (UID: \"fea58380-4304-485b-aefc-48f9baea4126\") " pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:46:02 crc kubenswrapper[5048]: E1213 06:46:02.851485 5048 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 13 06:46:02 crc kubenswrapper[5048]: E1213 06:46:02.851556 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-webhook-certs podName:fea58380-4304-485b-aefc-48f9baea4126 nodeName:}" failed. No retries permitted until 2025-12-13 06:46:06.851538371 +0000 UTC m=+1000.718133042 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-webhook-certs") pod "openstack-operator-controller-manager-58d7cfb75d-nsmnm" (UID: "fea58380-4304-485b-aefc-48f9baea4126") : secret "webhook-server-cert" not found Dec 13 06:46:06 crc kubenswrapper[5048]: I1213 06:46:06.107354 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d1a74609-2a57-44a5-8e88-dab8ae7fba98-cert\") pod \"infra-operator-controller-manager-7cf9bd88b6-wswvl\" (UID: \"d1a74609-2a57-44a5-8e88-dab8ae7fba98\") " pod="openstack-operators/infra-operator-controller-manager-7cf9bd88b6-wswvl" Dec 13 06:46:06 crc kubenswrapper[5048]: E1213 06:46:06.107904 5048 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 13 06:46:06 crc kubenswrapper[5048]: E1213 06:46:06.107955 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d1a74609-2a57-44a5-8e88-dab8ae7fba98-cert podName:d1a74609-2a57-44a5-8e88-dab8ae7fba98 nodeName:}" failed. No retries permitted until 2025-12-13 06:46:14.107938593 +0000 UTC m=+1007.974533174 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d1a74609-2a57-44a5-8e88-dab8ae7fba98-cert") pod "infra-operator-controller-manager-7cf9bd88b6-wswvl" (UID: "d1a74609-2a57-44a5-8e88-dab8ae7fba98") : secret "infra-operator-webhook-server-cert" not found Dec 13 06:46:06 crc kubenswrapper[5048]: I1213 06:46:06.513601 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/db14d0ba-f68b-48a5-b69a-97399548fca1-cert\") pod \"openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878\" (UID: \"db14d0ba-f68b-48a5-b69a-97399548fca1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" Dec 13 06:46:06 crc kubenswrapper[5048]: E1213 06:46:06.513810 5048 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 13 06:46:06 crc kubenswrapper[5048]: E1213 06:46:06.513903 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db14d0ba-f68b-48a5-b69a-97399548fca1-cert podName:db14d0ba-f68b-48a5-b69a-97399548fca1 nodeName:}" failed. No retries permitted until 2025-12-13 06:46:14.513879743 +0000 UTC m=+1008.380474324 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/db14d0ba-f68b-48a5-b69a-97399548fca1-cert") pod "openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" (UID: "db14d0ba-f68b-48a5-b69a-97399548fca1") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 13 06:46:06 crc kubenswrapper[5048]: I1213 06:46:06.919275 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-metrics-certs\") pod \"openstack-operator-controller-manager-58d7cfb75d-nsmnm\" (UID: \"fea58380-4304-485b-aefc-48f9baea4126\") " pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:46:06 crc kubenswrapper[5048]: E1213 06:46:06.919483 5048 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 13 06:46:06 crc kubenswrapper[5048]: I1213 06:46:06.919700 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-webhook-certs\") pod \"openstack-operator-controller-manager-58d7cfb75d-nsmnm\" (UID: \"fea58380-4304-485b-aefc-48f9baea4126\") " pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:46:06 crc kubenswrapper[5048]: E1213 06:46:06.919751 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-metrics-certs podName:fea58380-4304-485b-aefc-48f9baea4126 nodeName:}" failed. No retries permitted until 2025-12-13 06:46:14.919732112 +0000 UTC m=+1008.786326693 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-metrics-certs") pod "openstack-operator-controller-manager-58d7cfb75d-nsmnm" (UID: "fea58380-4304-485b-aefc-48f9baea4126") : secret "metrics-server-cert" not found Dec 13 06:46:06 crc kubenswrapper[5048]: E1213 06:46:06.919827 5048 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 13 06:46:06 crc kubenswrapper[5048]: E1213 06:46:06.919881 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-webhook-certs podName:fea58380-4304-485b-aefc-48f9baea4126 nodeName:}" failed. No retries permitted until 2025-12-13 06:46:14.919863015 +0000 UTC m=+1008.786457686 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-webhook-certs") pod "openstack-operator-controller-manager-58d7cfb75d-nsmnm" (UID: "fea58380-4304-485b-aefc-48f9baea4126") : secret "webhook-server-cert" not found Dec 13 06:46:14 crc kubenswrapper[5048]: I1213 06:46:14.126694 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d1a74609-2a57-44a5-8e88-dab8ae7fba98-cert\") pod \"infra-operator-controller-manager-7cf9bd88b6-wswvl\" (UID: \"d1a74609-2a57-44a5-8e88-dab8ae7fba98\") " pod="openstack-operators/infra-operator-controller-manager-7cf9bd88b6-wswvl" Dec 13 06:46:14 crc kubenswrapper[5048]: E1213 06:46:14.126925 5048 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 13 06:46:14 crc kubenswrapper[5048]: E1213 06:46:14.127363 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d1a74609-2a57-44a5-8e88-dab8ae7fba98-cert podName:d1a74609-2a57-44a5-8e88-dab8ae7fba98 nodeName:}" failed. No retries permitted until 2025-12-13 06:46:30.127341267 +0000 UTC m=+1023.993935848 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d1a74609-2a57-44a5-8e88-dab8ae7fba98-cert") pod "infra-operator-controller-manager-7cf9bd88b6-wswvl" (UID: "d1a74609-2a57-44a5-8e88-dab8ae7fba98") : secret "infra-operator-webhook-server-cert" not found Dec 13 06:46:14 crc kubenswrapper[5048]: I1213 06:46:14.533768 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/db14d0ba-f68b-48a5-b69a-97399548fca1-cert\") pod \"openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878\" (UID: \"db14d0ba-f68b-48a5-b69a-97399548fca1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" Dec 13 06:46:14 crc kubenswrapper[5048]: E1213 06:46:14.534064 5048 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 13 06:46:14 crc kubenswrapper[5048]: E1213 06:46:14.534254 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db14d0ba-f68b-48a5-b69a-97399548fca1-cert podName:db14d0ba-f68b-48a5-b69a-97399548fca1 nodeName:}" failed. No retries permitted until 2025-12-13 06:46:30.534194413 +0000 UTC m=+1024.400789014 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/db14d0ba-f68b-48a5-b69a-97399548fca1-cert") pod "openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" (UID: "db14d0ba-f68b-48a5-b69a-97399548fca1") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 13 06:46:14 crc kubenswrapper[5048]: I1213 06:46:14.939375 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-webhook-certs\") pod \"openstack-operator-controller-manager-58d7cfb75d-nsmnm\" (UID: \"fea58380-4304-485b-aefc-48f9baea4126\") " pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:46:14 crc kubenswrapper[5048]: I1213 06:46:14.939542 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-metrics-certs\") pod \"openstack-operator-controller-manager-58d7cfb75d-nsmnm\" (UID: \"fea58380-4304-485b-aefc-48f9baea4126\") " pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:46:14 crc kubenswrapper[5048]: E1213 06:46:14.939688 5048 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 13 06:46:14 crc kubenswrapper[5048]: E1213 06:46:14.939771 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-metrics-certs podName:fea58380-4304-485b-aefc-48f9baea4126 nodeName:}" failed. No retries permitted until 2025-12-13 06:46:30.939752483 +0000 UTC m=+1024.806347064 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-metrics-certs") pod "openstack-operator-controller-manager-58d7cfb75d-nsmnm" (UID: "fea58380-4304-485b-aefc-48f9baea4126") : secret "metrics-server-cert" not found Dec 13 06:46:14 crc kubenswrapper[5048]: I1213 06:46:14.947896 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-webhook-certs\") pod \"openstack-operator-controller-manager-58d7cfb75d-nsmnm\" (UID: \"fea58380-4304-485b-aefc-48f9baea4126\") " pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:46:16 crc kubenswrapper[5048]: I1213 06:46:16.216343 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:46:16 crc kubenswrapper[5048]: I1213 06:46:16.216423 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:46:16 crc kubenswrapper[5048]: I1213 06:46:16.216516 5048 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 06:46:16 crc kubenswrapper[5048]: I1213 06:46:16.217242 5048 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b78100e7e0c4b665e6330e2c03f1531c2cd133387b8c2c53260a2c5bf79c77e3"} pod="openshift-machine-config-operator/machine-config-daemon-j7hns" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 13 06:46:16 crc kubenswrapper[5048]: I1213 06:46:16.217300 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" containerID="cri-o://b78100e7e0c4b665e6330e2c03f1531c2cd133387b8c2c53260a2c5bf79c77e3" gracePeriod=600 Dec 13 06:46:17 crc kubenswrapper[5048]: I1213 06:46:17.966935 5048 generic.go:334] "Generic (PLEG): container finished" podID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerID="b78100e7e0c4b665e6330e2c03f1531c2cd133387b8c2c53260a2c5bf79c77e3" exitCode=0 Dec 13 06:46:17 crc kubenswrapper[5048]: I1213 06:46:17.967016 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerDied","Data":"b78100e7e0c4b665e6330e2c03f1531c2cd133387b8c2c53260a2c5bf79c77e3"} Dec 13 06:46:17 crc kubenswrapper[5048]: I1213 06:46:17.967290 5048 scope.go:117] "RemoveContainer" containerID="04bb8ca387b7d0e469a66283e3b83e0b6d5378cbf2f1611cc5c1df9ba125a043" Dec 13 06:46:20 crc kubenswrapper[5048]: E1213 06:46:20.590278 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/horizon-operator@sha256:9e847f4dbdea19ab997f32a02b3680a9bd966f9c705911645c3866a19fda9ea5" Dec 13 06:46:20 crc kubenswrapper[5048]: E1213 06:46:20.590810 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:9e847f4dbdea19ab997f32a02b3680a9bd966f9c705911645c3866a19fda9ea5,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lx7db,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-6ccf486b9-g6ggd_openstack-operators(9bd1a72e-ad49-46ae-a748-a21d05114b84): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:46:20 crc kubenswrapper[5048]: E1213 06:46:20.592767 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-6ccf486b9-g6ggd" podUID="9bd1a72e-ad49-46ae-a748-a21d05114b84" Dec 13 06:46:20 crc kubenswrapper[5048]: E1213 06:46:20.987652 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/horizon-operator@sha256:9e847f4dbdea19ab997f32a02b3680a9bd966f9c705911645c3866a19fda9ea5\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-6ccf486b9-g6ggd" podUID="9bd1a72e-ad49-46ae-a748-a21d05114b84" Dec 13 06:46:21 crc kubenswrapper[5048]: E1213 06:46:21.278792 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/cinder-operator@sha256:981b6a8f95934a86c5f10ef6e198b07265aeba7f11cf84b9ccd13dfaf06f3ca3" Dec 13 06:46:21 crc kubenswrapper[5048]: E1213 06:46:21.279016 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/cinder-operator@sha256:981b6a8f95934a86c5f10ef6e198b07265aeba7f11cf84b9ccd13dfaf06f3ca3,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rdvm6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-5cf45c46bd-mblxt_openstack-operators(6d515ed0-b2e1-469e-a7c5-bbe62664979e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:46:21 crc kubenswrapper[5048]: E1213 06:46:21.280180 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/cinder-operator-controller-manager-5cf45c46bd-mblxt" podUID="6d515ed0-b2e1-469e-a7c5-bbe62664979e" Dec 13 06:46:21 crc kubenswrapper[5048]: E1213 06:46:21.994625 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/cinder-operator@sha256:981b6a8f95934a86c5f10ef6e198b07265aeba7f11cf84b9ccd13dfaf06f3ca3\\\"\"" pod="openstack-operators/cinder-operator-controller-manager-5cf45c46bd-mblxt" podUID="6d515ed0-b2e1-469e-a7c5-bbe62664979e" Dec 13 06:46:22 crc kubenswrapper[5048]: E1213 06:46:22.988260 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:44126f9c6b1d2bf752ddf989e20a4fc4cc1c07723d4fcb78465ccb2f55da6b3a" Dec 13 06:46:22 crc kubenswrapper[5048]: E1213 06:46:22.988495 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:44126f9c6b1d2bf752ddf989e20a4fc4cc1c07723d4fcb78465ccb2f55da6b3a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5blm4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5fdd9786f7-mkjnl_openstack-operators(c58196eb-9b95-450a-98ff-d852ff7125c5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:46:22 crc kubenswrapper[5048]: E1213 06:46:22.990065 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-5fdd9786f7-mkjnl" podUID="c58196eb-9b95-450a-98ff-d852ff7125c5" Dec 13 06:46:22 crc kubenswrapper[5048]: E1213 06:46:22.998074 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:44126f9c6b1d2bf752ddf989e20a4fc4cc1c07723d4fcb78465ccb2f55da6b3a\\\"\"" pod="openstack-operators/manila-operator-controller-manager-5fdd9786f7-mkjnl" podUID="c58196eb-9b95-450a-98ff-d852ff7125c5" Dec 13 06:46:23 crc kubenswrapper[5048]: E1213 06:46:23.457549 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Dec 13 06:46:23 crc kubenswrapper[5048]: E1213 06:46:23.457711 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rk292,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-fvxqc_openstack-operators(4b1fbe4c-27c1-4456-bf01-6a42320cb63d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:46:23 crc kubenswrapper[5048]: E1213 06:46:23.459752 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvxqc" podUID="4b1fbe4c-27c1-4456-bf01-6a42320cb63d" Dec 13 06:46:23 crc kubenswrapper[5048]: E1213 06:46:23.921496 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/barbican-operator@sha256:f6059a0fbf031d34dcf086d14ce8c0546caeaee23c5780e90b5037c5feee9fea" Dec 13 06:46:23 crc kubenswrapper[5048]: E1213 06:46:23.922119 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/barbican-operator@sha256:f6059a0fbf031d34dcf086d14ce8c0546caeaee23c5780e90b5037c5feee9fea,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7r8lk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-95949466-xmb6d_openstack-operators(50877cb8-07a0-48c4-af3d-72144fa836e0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:46:23 crc kubenswrapper[5048]: E1213 06:46:23.923381 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/barbican-operator-controller-manager-95949466-xmb6d" podUID="50877cb8-07a0-48c4-af3d-72144fa836e0" Dec 13 06:46:24 crc kubenswrapper[5048]: E1213 06:46:24.004091 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/barbican-operator@sha256:f6059a0fbf031d34dcf086d14ce8c0546caeaee23c5780e90b5037c5feee9fea\\\"\"" pod="openstack-operators/barbican-operator-controller-manager-95949466-xmb6d" podUID="50877cb8-07a0-48c4-af3d-72144fa836e0" Dec 13 06:46:24 crc kubenswrapper[5048]: E1213 06:46:24.004333 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvxqc" podUID="4b1fbe4c-27c1-4456-bf01-6a42320cb63d" Dec 13 06:46:24 crc kubenswrapper[5048]: E1213 06:46:24.603808 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/designate-operator@sha256:900050d3501c0785b227db34b89883efe68247816e5c7427cacb74f8aa10605a" Dec 13 06:46:24 crc kubenswrapper[5048]: E1213 06:46:24.604087 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/designate-operator@sha256:900050d3501c0785b227db34b89883efe68247816e5c7427cacb74f8aa10605a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kqpvp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-66f8b87655-ll9d4_openstack-operators(83f0165e-ed2b-436c-9ae0-e871bd291638): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:46:24 crc kubenswrapper[5048]: E1213 06:46:24.606456 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-ll9d4" podUID="83f0165e-ed2b-436c-9ae0-e871bd291638" Dec 13 06:46:25 crc kubenswrapper[5048]: E1213 06:46:25.011171 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/designate-operator@sha256:900050d3501c0785b227db34b89883efe68247816e5c7427cacb74f8aa10605a\\\"\"" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-ll9d4" podUID="83f0165e-ed2b-436c-9ae0-e871bd291638" Dec 13 06:46:25 crc kubenswrapper[5048]: E1213 06:46:25.427491 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670" Dec 13 06:46:25 crc kubenswrapper[5048]: E1213 06:46:25.427693 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jx9bv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-5fbbf8b6cc-fklqp_openstack-operators(201cc161-1f13-498b-b99a-0d9c91bdc15a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:46:25 crc kubenswrapper[5048]: E1213 06:46:25.428919 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-fklqp" podUID="201cc161-1f13-498b-b99a-0d9c91bdc15a" Dec 13 06:46:26 crc kubenswrapper[5048]: E1213 06:46:26.016548 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670\\\"\"" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-fklqp" podUID="201cc161-1f13-498b-b99a-0d9c91bdc15a" Dec 13 06:46:30 crc kubenswrapper[5048]: I1213 06:46:30.213733 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d1a74609-2a57-44a5-8e88-dab8ae7fba98-cert\") pod \"infra-operator-controller-manager-7cf9bd88b6-wswvl\" (UID: \"d1a74609-2a57-44a5-8e88-dab8ae7fba98\") " pod="openstack-operators/infra-operator-controller-manager-7cf9bd88b6-wswvl" Dec 13 06:46:30 crc kubenswrapper[5048]: I1213 06:46:30.219426 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d1a74609-2a57-44a5-8e88-dab8ae7fba98-cert\") pod \"infra-operator-controller-manager-7cf9bd88b6-wswvl\" (UID: \"d1a74609-2a57-44a5-8e88-dab8ae7fba98\") " pod="openstack-operators/infra-operator-controller-manager-7cf9bd88b6-wswvl" Dec 13 06:46:30 crc kubenswrapper[5048]: I1213 06:46:30.501556 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-pnzfn" Dec 13 06:46:30 crc kubenswrapper[5048]: I1213 06:46:30.508916 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-7cf9bd88b6-wswvl" Dec 13 06:46:30 crc kubenswrapper[5048]: I1213 06:46:30.619575 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/db14d0ba-f68b-48a5-b69a-97399548fca1-cert\") pod \"openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878\" (UID: \"db14d0ba-f68b-48a5-b69a-97399548fca1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" Dec 13 06:46:30 crc kubenswrapper[5048]: I1213 06:46:30.623769 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/db14d0ba-f68b-48a5-b69a-97399548fca1-cert\") pod \"openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878\" (UID: \"db14d0ba-f68b-48a5-b69a-97399548fca1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" Dec 13 06:46:30 crc kubenswrapper[5048]: I1213 06:46:30.863077 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-f5g7n" Dec 13 06:46:30 crc kubenswrapper[5048]: I1213 06:46:30.871530 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" Dec 13 06:46:31 crc kubenswrapper[5048]: I1213 06:46:31.024820 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-metrics-certs\") pod \"openstack-operator-controller-manager-58d7cfb75d-nsmnm\" (UID: \"fea58380-4304-485b-aefc-48f9baea4126\") " pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:46:31 crc kubenswrapper[5048]: I1213 06:46:31.028391 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fea58380-4304-485b-aefc-48f9baea4126-metrics-certs\") pod \"openstack-operator-controller-manager-58d7cfb75d-nsmnm\" (UID: \"fea58380-4304-485b-aefc-48f9baea4126\") " pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:46:31 crc kubenswrapper[5048]: I1213 06:46:31.260499 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-44wdw" Dec 13 06:46:31 crc kubenswrapper[5048]: I1213 06:46:31.268675 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:46:33 crc kubenswrapper[5048]: E1213 06:46:33.737048 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94" Dec 13 06:46:33 crc kubenswrapper[5048]: E1213 06:46:33.737975 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7rbdb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-756ccf86c7-dv9ww_openstack-operators(a046de13-f7f7-4d7c-abf3-79ed8cc60fad): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:46:33 crc kubenswrapper[5048]: E1213 06:46:33.739216 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/test-operator-controller-manager-756ccf86c7-dv9ww" podUID="a046de13-f7f7-4d7c-abf3-79ed8cc60fad" Dec 13 06:46:34 crc kubenswrapper[5048]: E1213 06:46:34.407771 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/watcher-operator@sha256:961417d59f527d925ac48ff6a11de747d0493315e496e34dc83d76a1a1fff58a" Dec 13 06:46:34 crc kubenswrapper[5048]: E1213 06:46:34.408648 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:961417d59f527d925ac48ff6a11de747d0493315e496e34dc83d76a1a1fff58a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cxqrd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-55f78b7c4c-b4f6l_openstack-operators(7165a29e-88bf-4194-bffa-414a675d1be5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:46:34 crc kubenswrapper[5048]: E1213 06:46:34.410074 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-55f78b7c4c-b4f6l" podUID="7165a29e-88bf-4194-bffa-414a675d1be5" Dec 13 06:46:35 crc kubenswrapper[5048]: I1213 06:46:35.570225 5048 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 13 06:46:36 crc kubenswrapper[5048]: E1213 06:46:36.013752 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/telemetry-operator@sha256:f27e732ec1faee765461bf137d9be81278b2fa39675019a73622755e1e610b6f" Dec 13 06:46:36 crc kubenswrapper[5048]: E1213 06:46:36.013962 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:f27e732ec1faee765461bf137d9be81278b2fa39675019a73622755e1e610b6f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7fsm4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-97d456b9-2bg97_openstack-operators(89f4ffab-8c61-4389-8f41-43cd8e2d54de): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:46:36 crc kubenswrapper[5048]: E1213 06:46:36.015515 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-97d456b9-2bg97" podUID="89f4ffab-8c61-4389-8f41-43cd8e2d54de" Dec 13 06:46:36 crc kubenswrapper[5048]: E1213 06:46:36.636702 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:72ad6517987f674af0d0ae092cbb874aeae909c8b8b60188099c311762ebc8f7" Dec 13 06:46:36 crc kubenswrapper[5048]: E1213 06:46:36.636899 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:72ad6517987f674af0d0ae092cbb874aeae909c8b8b60188099c311762ebc8f7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fdnjj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-5c7cbf548f-fdf2l_openstack-operators(148d1633-90ad-45da-af0b-5b182ee41795): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:46:36 crc kubenswrapper[5048]: E1213 06:46:36.638074 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-5c7cbf548f-fdf2l" podUID="148d1633-90ad-45da-af0b-5b182ee41795" Dec 13 06:46:37 crc kubenswrapper[5048]: E1213 06:46:37.097602 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:72ad6517987f674af0d0ae092cbb874aeae909c8b8b60188099c311762ebc8f7\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-5c7cbf548f-fdf2l" podUID="148d1633-90ad-45da-af0b-5b182ee41795" Dec 13 06:46:38 crc kubenswrapper[5048]: I1213 06:46:38.580988 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-7cf9bd88b6-wswvl"] Dec 13 06:46:38 crc kubenswrapper[5048]: I1213 06:46:38.651668 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm"] Dec 13 06:46:38 crc kubenswrapper[5048]: I1213 06:46:38.682271 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878"] Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.123362 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-f458558d7-xxj48" event={"ID":"b47e0c4b-dd06-4d55-b3d7-8e3d968df8e6","Type":"ContainerStarted","Data":"4e943cb10455bae9edeef255266412953f5db38290714db25d95e6522f072028"} Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.123686 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-f458558d7-xxj48" Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.143610 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-f458558d7-xxj48" podStartSLOduration=9.570609892 podStartE2EDuration="41.143592827s" podCreationTimestamp="2025-12-13 06:45:58 +0000 UTC" firstStartedPulling="2025-12-13 06:45:59.689675343 +0000 UTC m=+993.556269924" lastFinishedPulling="2025-12-13 06:46:31.262658278 +0000 UTC m=+1025.129252859" observedRunningTime="2025-12-13 06:46:39.142765644 +0000 UTC m=+1033.009360225" watchObservedRunningTime="2025-12-13 06:46:39.143592827 +0000 UTC m=+1033.010187428" Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.144140 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-767f9d7567-md4jk" event={"ID":"4d953dd1-d113-4dc6-a80b-3ded9d08b476","Type":"ContainerStarted","Data":"8ccbf19ddb78f07ad45382dda09b73f5bd5e44b48827070a997417ac73e17352"} Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.144396 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-767f9d7567-md4jk" Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.146383 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-zgrtn" event={"ID":"72736371-ff33-45e1-a685-9e2f89dcec60","Type":"ContainerStarted","Data":"509c47fee741444acefefef801df43416c7b5dcba4a1882b14fb15e54dc743ea"} Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.146971 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-zgrtn" Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.148696 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-f76f4954c-bpgwj" event={"ID":"1994b961-1801-4279-8c61-a901803b4a3a","Type":"ContainerStarted","Data":"398882e3557278111271b470f5d9be080055e2dcd397f87560e427ac18c13d09"} Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.149149 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-f76f4954c-bpgwj" Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.150263 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-59b8dcb766-sn85z" event={"ID":"07f258ba-5ff1-4d34-8e07-62c024c15dba","Type":"ContainerStarted","Data":"7725c51d3c8f86556c1fcaced83e09e4196796cd652ced22c2f764605eaa21dc"} Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.150665 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-59b8dcb766-sn85z" Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.151833 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-gqsj6" event={"ID":"659a6b7b-7ef9-4fc2-8ea4-4298020aa94c","Type":"ContainerStarted","Data":"de1955b9f860e67dd0a2e5eba3a01d43348d42a7549b19f8c46cb7feada52702"} Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.152241 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-gqsj6" Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.175348 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerStarted","Data":"3dc134f93584bad71d64c1b4cfe7bce9b820cc019159e00fde72b17e966595a4"} Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.179198 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-767f9d7567-md4jk" podStartSLOduration=16.027711419 podStartE2EDuration="41.179172635s" podCreationTimestamp="2025-12-13 06:45:58 +0000 UTC" firstStartedPulling="2025-12-13 06:45:59.436170915 +0000 UTC m=+993.302765496" lastFinishedPulling="2025-12-13 06:46:24.587632141 +0000 UTC m=+1018.454226712" observedRunningTime="2025-12-13 06:46:39.17468442 +0000 UTC m=+1033.041279011" watchObservedRunningTime="2025-12-13 06:46:39.179172635 +0000 UTC m=+1033.045767216" Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.190182 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5c6df8f9-sx7sf" event={"ID":"813f57d5-1063-4dbc-9847-b6ea97e46fbe","Type":"ContainerStarted","Data":"566b27dc153a47e5c6f6aaf3d6d3424d2f8243f2a07274d02f679ee39ce757ca"} Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.196180 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-8665b56d78-g782d" event={"ID":"6d4d57f0-ff75-455d-8e8a-fe4947b3ee40","Type":"ContainerStarted","Data":"9e345ab98ac03575cbf856241b31ccb90682576e0980a3b5669bbc75308baaac"} Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.196877 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-8665b56d78-g782d" Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.205653 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-59b8dcb766-sn85z" podStartSLOduration=15.97119642 podStartE2EDuration="41.205623939s" podCreationTimestamp="2025-12-13 06:45:58 +0000 UTC" firstStartedPulling="2025-12-13 06:45:59.353212352 +0000 UTC m=+993.219806933" lastFinishedPulling="2025-12-13 06:46:24.587639871 +0000 UTC m=+1018.454234452" observedRunningTime="2025-12-13 06:46:39.19667109 +0000 UTC m=+1033.063265691" watchObservedRunningTime="2025-12-13 06:46:39.205623939 +0000 UTC m=+1033.072218520" Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.254023 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-f76f4954c-bpgwj" podStartSLOduration=16.560464681 podStartE2EDuration="41.253998882s" podCreationTimestamp="2025-12-13 06:45:58 +0000 UTC" firstStartedPulling="2025-12-13 06:45:59.8941149 +0000 UTC m=+993.760709481" lastFinishedPulling="2025-12-13 06:46:24.587649101 +0000 UTC m=+1018.454243682" observedRunningTime="2025-12-13 06:46:39.24096899 +0000 UTC m=+1033.107563601" watchObservedRunningTime="2025-12-13 06:46:39.253998882 +0000 UTC m=+1033.120593483" Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.265055 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-zgrtn" podStartSLOduration=15.761082197 podStartE2EDuration="41.265028848s" podCreationTimestamp="2025-12-13 06:45:58 +0000 UTC" firstStartedPulling="2025-12-13 06:45:59.896816095 +0000 UTC m=+993.763410676" lastFinishedPulling="2025-12-13 06:46:25.400762746 +0000 UTC m=+1019.267357327" observedRunningTime="2025-12-13 06:46:39.260086882 +0000 UTC m=+1033.126681473" watchObservedRunningTime="2025-12-13 06:46:39.265028848 +0000 UTC m=+1033.131623439" Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.290191 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-gqsj6" podStartSLOduration=16.853165757 podStartE2EDuration="41.290168546s" podCreationTimestamp="2025-12-13 06:45:58 +0000 UTC" firstStartedPulling="2025-12-13 06:46:00.150618891 +0000 UTC m=+994.017213462" lastFinishedPulling="2025-12-13 06:46:24.58762167 +0000 UTC m=+1018.454216251" observedRunningTime="2025-12-13 06:46:39.274712097 +0000 UTC m=+1033.141306688" watchObservedRunningTime="2025-12-13 06:46:39.290168546 +0000 UTC m=+1033.156763127" Dec 13 06:46:39 crc kubenswrapper[5048]: I1213 06:46:39.337036 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-8665b56d78-g782d" podStartSLOduration=4.533971984 podStartE2EDuration="41.337021188s" podCreationTimestamp="2025-12-13 06:45:58 +0000 UTC" firstStartedPulling="2025-12-13 06:46:00.197166654 +0000 UTC m=+994.063761235" lastFinishedPulling="2025-12-13 06:46:37.000215828 +0000 UTC m=+1030.866810439" observedRunningTime="2025-12-13 06:46:39.334280932 +0000 UTC m=+1033.200875523" watchObservedRunningTime="2025-12-13 06:46:39.337021188 +0000 UTC m=+1033.203615769" Dec 13 06:46:39 crc kubenswrapper[5048]: W1213 06:46:39.700314 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfea58380_4304_485b_aefc_48f9baea4126.slice/crio-d4e639d9023988161b165a8c28892417f4d9e4c89235c4208345aefdd3941dc1 WatchSource:0}: Error finding container d4e639d9023988161b165a8c28892417f4d9e4c89235c4208345aefdd3941dc1: Status 404 returned error can't find the container with id d4e639d9023988161b165a8c28892417f4d9e4c89235c4208345aefdd3941dc1 Dec 13 06:46:39 crc kubenswrapper[5048]: W1213 06:46:39.701161 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddb14d0ba_f68b_48a5_b69a_97399548fca1.slice/crio-0730bca972d402066d7971cf48b0885b1e99c1e17f1ae010a71f8fe255b8671b WatchSource:0}: Error finding container 0730bca972d402066d7971cf48b0885b1e99c1e17f1ae010a71f8fe255b8671b: Status 404 returned error can't find the container with id 0730bca972d402066d7971cf48b0885b1e99c1e17f1ae010a71f8fe255b8671b Dec 13 06:46:40 crc kubenswrapper[5048]: I1213 06:46:40.210970 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-5cf45c46bd-mblxt" event={"ID":"6d515ed0-b2e1-469e-a7c5-bbe62664979e","Type":"ContainerStarted","Data":"56822ebffc0e81f484fad96748cdab4bf07eb0cf737764ebaee26aa88616cf23"} Dec 13 06:46:40 crc kubenswrapper[5048]: I1213 06:46:40.212265 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-5cf45c46bd-mblxt" Dec 13 06:46:40 crc kubenswrapper[5048]: I1213 06:46:40.213558 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" event={"ID":"db14d0ba-f68b-48a5-b69a-97399548fca1","Type":"ContainerStarted","Data":"0730bca972d402066d7971cf48b0885b1e99c1e17f1ae010a71f8fe255b8671b"} Dec 13 06:46:40 crc kubenswrapper[5048]: I1213 06:46:40.218498 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-4ck8m" event={"ID":"859bbc21-6f39-4712-a04f-4473b78b32eb","Type":"ContainerStarted","Data":"57daa2d8371797dfd322bf532028f191bbdfe3170c8ae9001478d792f784fa5b"} Dec 13 06:46:40 crc kubenswrapper[5048]: I1213 06:46:40.218818 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-4ck8m" Dec 13 06:46:40 crc kubenswrapper[5048]: I1213 06:46:40.220094 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" event={"ID":"fea58380-4304-485b-aefc-48f9baea4126","Type":"ContainerStarted","Data":"d4e639d9023988161b165a8c28892417f4d9e4c89235c4208345aefdd3941dc1"} Dec 13 06:46:40 crc kubenswrapper[5048]: I1213 06:46:40.221582 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-7cf9bd88b6-wswvl" event={"ID":"d1a74609-2a57-44a5-8e88-dab8ae7fba98","Type":"ContainerStarted","Data":"cd483608ed8dfcfb7c3d737e48c871c8f8805865bf624c1d3b6f4bdc3dff6d64"} Dec 13 06:46:40 crc kubenswrapper[5048]: I1213 06:46:40.231824 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-5cf45c46bd-mblxt" podStartSLOduration=4.392921724 podStartE2EDuration="43.231802161s" podCreationTimestamp="2025-12-13 06:45:57 +0000 UTC" firstStartedPulling="2025-12-13 06:45:59.348209753 +0000 UTC m=+993.214804334" lastFinishedPulling="2025-12-13 06:46:38.18709017 +0000 UTC m=+1032.053684771" observedRunningTime="2025-12-13 06:46:40.225811665 +0000 UTC m=+1034.092406266" watchObservedRunningTime="2025-12-13 06:46:40.231802161 +0000 UTC m=+1034.098396742" Dec 13 06:46:40 crc kubenswrapper[5048]: I1213 06:46:40.242342 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-5c6df8f9-sx7sf" podStartSLOduration=5.370765815 podStartE2EDuration="42.242326012s" podCreationTimestamp="2025-12-13 06:45:58 +0000 UTC" firstStartedPulling="2025-12-13 06:46:00.170331968 +0000 UTC m=+994.036926539" lastFinishedPulling="2025-12-13 06:46:37.041892145 +0000 UTC m=+1030.908486736" observedRunningTime="2025-12-13 06:46:40.241399237 +0000 UTC m=+1034.107993838" watchObservedRunningTime="2025-12-13 06:46:40.242326012 +0000 UTC m=+1034.108920593" Dec 13 06:46:41 crc kubenswrapper[5048]: I1213 06:46:41.246792 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-6ccf486b9-g6ggd" event={"ID":"9bd1a72e-ad49-46ae-a748-a21d05114b84","Type":"ContainerStarted","Data":"b7720b512c5a594e781bb18a63b208171e479e0b71a47c320ba513e6011e50c4"} Dec 13 06:46:41 crc kubenswrapper[5048]: I1213 06:46:41.247754 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-6ccf486b9-g6ggd" Dec 13 06:46:41 crc kubenswrapper[5048]: I1213 06:46:41.249190 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-fklqp" event={"ID":"201cc161-1f13-498b-b99a-0d9c91bdc15a","Type":"ContainerStarted","Data":"83b4eb7972976617388a2b98e060801aaf596583b7801864e0e25074712c507f"} Dec 13 06:46:41 crc kubenswrapper[5048]: I1213 06:46:41.249554 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-fklqp" Dec 13 06:46:41 crc kubenswrapper[5048]: I1213 06:46:41.252528 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvxqc" event={"ID":"4b1fbe4c-27c1-4456-bf01-6a42320cb63d","Type":"ContainerStarted","Data":"e8eff491e77a476db5791b07b5ed263e7a4f239458d8aef1c530756fe3488d03"} Dec 13 06:46:41 crc kubenswrapper[5048]: I1213 06:46:41.254043 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-ll9d4" event={"ID":"83f0165e-ed2b-436c-9ae0-e871bd291638","Type":"ContainerStarted","Data":"7d3a8d019a9cd5f70c364a833eee8d1a388d8ce6030cfb75815a0860a0b407a2"} Dec 13 06:46:41 crc kubenswrapper[5048]: I1213 06:46:41.254407 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-ll9d4" Dec 13 06:46:41 crc kubenswrapper[5048]: I1213 06:46:41.255455 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-95949466-xmb6d" event={"ID":"50877cb8-07a0-48c4-af3d-72144fa836e0","Type":"ContainerStarted","Data":"64cf3b674b911447f7024fdbe1d6d0ac2f685cb9072df47d6eeef265b5212e2e"} Dec 13 06:46:41 crc kubenswrapper[5048]: I1213 06:46:41.255876 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-95949466-xmb6d" Dec 13 06:46:41 crc kubenswrapper[5048]: I1213 06:46:41.257859 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5fdd9786f7-mkjnl" event={"ID":"c58196eb-9b95-450a-98ff-d852ff7125c5","Type":"ContainerStarted","Data":"2bda5e2e8a60ac5305dbb657aad2851fdc3dcfaf97d9e23a5c6993f7a21865b6"} Dec 13 06:46:41 crc kubenswrapper[5048]: I1213 06:46:41.258179 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5fdd9786f7-mkjnl" Dec 13 06:46:41 crc kubenswrapper[5048]: I1213 06:46:41.260749 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" event={"ID":"fea58380-4304-485b-aefc-48f9baea4126","Type":"ContainerStarted","Data":"c6281fc3673e4889f6f2ff2dca321fd015745be5d75dc6566c865849e4c9f18a"} Dec 13 06:46:41 crc kubenswrapper[5048]: I1213 06:46:41.260794 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:46:41 crc kubenswrapper[5048]: I1213 06:46:41.268147 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-4ck8m" podStartSLOduration=6.455646707 podStartE2EDuration="43.268127623s" podCreationTimestamp="2025-12-13 06:45:58 +0000 UTC" firstStartedPulling="2025-12-13 06:46:00.187764153 +0000 UTC m=+994.054358734" lastFinishedPulling="2025-12-13 06:46:37.000244999 +0000 UTC m=+1030.866839650" observedRunningTime="2025-12-13 06:46:40.260208679 +0000 UTC m=+1034.126803260" watchObservedRunningTime="2025-12-13 06:46:41.268127623 +0000 UTC m=+1035.134722204" Dec 13 06:46:41 crc kubenswrapper[5048]: I1213 06:46:41.269531 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-6ccf486b9-g6ggd" podStartSLOduration=3.068418703 podStartE2EDuration="43.269524362s" podCreationTimestamp="2025-12-13 06:45:58 +0000 UTC" firstStartedPulling="2025-12-13 06:45:59.616362518 +0000 UTC m=+993.482957099" lastFinishedPulling="2025-12-13 06:46:39.817468177 +0000 UTC m=+1033.684062758" observedRunningTime="2025-12-13 06:46:41.264342628 +0000 UTC m=+1035.130937229" watchObservedRunningTime="2025-12-13 06:46:41.269524362 +0000 UTC m=+1035.136118943" Dec 13 06:46:41 crc kubenswrapper[5048]: I1213 06:46:41.288203 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fvxqc" podStartSLOduration=3.616260314 podStartE2EDuration="43.28818217s" podCreationTimestamp="2025-12-13 06:45:58 +0000 UTC" firstStartedPulling="2025-12-13 06:46:00.165729961 +0000 UTC m=+994.032324542" lastFinishedPulling="2025-12-13 06:46:39.837651817 +0000 UTC m=+1033.704246398" observedRunningTime="2025-12-13 06:46:41.277797852 +0000 UTC m=+1035.144392453" watchObservedRunningTime="2025-12-13 06:46:41.28818217 +0000 UTC m=+1035.154776751" Dec 13 06:46:41 crc kubenswrapper[5048]: I1213 06:46:41.307585 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-ll9d4" podStartSLOduration=5.836513993 podStartE2EDuration="44.307565978s" podCreationTimestamp="2025-12-13 06:45:57 +0000 UTC" firstStartedPulling="2025-12-13 06:45:59.719251105 +0000 UTC m=+993.585845686" lastFinishedPulling="2025-12-13 06:46:38.19030309 +0000 UTC m=+1032.056897671" observedRunningTime="2025-12-13 06:46:41.304617926 +0000 UTC m=+1035.171212517" watchObservedRunningTime="2025-12-13 06:46:41.307565978 +0000 UTC m=+1035.174160569" Dec 13 06:46:41 crc kubenswrapper[5048]: I1213 06:46:41.327551 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-fklqp" podStartSLOduration=3.654368572 podStartE2EDuration="43.327527432s" podCreationTimestamp="2025-12-13 06:45:58 +0000 UTC" firstStartedPulling="2025-12-13 06:46:00.146030394 +0000 UTC m=+994.012624975" lastFinishedPulling="2025-12-13 06:46:39.819189254 +0000 UTC m=+1033.685783835" observedRunningTime="2025-12-13 06:46:41.326259107 +0000 UTC m=+1035.192853698" watchObservedRunningTime="2025-12-13 06:46:41.327527432 +0000 UTC m=+1035.194122013" Dec 13 06:46:41 crc kubenswrapper[5048]: I1213 06:46:41.344798 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5fdd9786f7-mkjnl" podStartSLOduration=3.279779143 podStartE2EDuration="43.344780952s" podCreationTimestamp="2025-12-13 06:45:58 +0000 UTC" firstStartedPulling="2025-12-13 06:45:59.883693371 +0000 UTC m=+993.750287942" lastFinishedPulling="2025-12-13 06:46:39.94869517 +0000 UTC m=+1033.815289751" observedRunningTime="2025-12-13 06:46:41.342817867 +0000 UTC m=+1035.209412458" watchObservedRunningTime="2025-12-13 06:46:41.344780952 +0000 UTC m=+1035.211375533" Dec 13 06:46:41 crc kubenswrapper[5048]: I1213 06:46:41.370526 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" podStartSLOduration=43.370509486 podStartE2EDuration="43.370509486s" podCreationTimestamp="2025-12-13 06:45:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:46:41.365750034 +0000 UTC m=+1035.232344625" watchObservedRunningTime="2025-12-13 06:46:41.370509486 +0000 UTC m=+1035.237104057" Dec 13 06:46:44 crc kubenswrapper[5048]: I1213 06:46:44.289184 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" event={"ID":"db14d0ba-f68b-48a5-b69a-97399548fca1","Type":"ContainerStarted","Data":"d7edb2238bb562f70039674ba485861fe769cf69cb8cea78dd216de8d0cba13c"} Dec 13 06:46:44 crc kubenswrapper[5048]: I1213 06:46:44.291233 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-7cf9bd88b6-wswvl" event={"ID":"d1a74609-2a57-44a5-8e88-dab8ae7fba98","Type":"ContainerStarted","Data":"fbb18efc1a69ecf9262b9e7432de04a2af40cc3b470d9fd4df9ea6e60759ede9"} Dec 13 06:46:44 crc kubenswrapper[5048]: I1213 06:46:44.291382 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" Dec 13 06:46:44 crc kubenswrapper[5048]: I1213 06:46:44.291578 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-7cf9bd88b6-wswvl" Dec 13 06:46:44 crc kubenswrapper[5048]: I1213 06:46:44.326042 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" podStartSLOduration=42.546638232 podStartE2EDuration="46.326016704s" podCreationTimestamp="2025-12-13 06:45:58 +0000 UTC" firstStartedPulling="2025-12-13 06:46:39.819367319 +0000 UTC m=+1033.685961890" lastFinishedPulling="2025-12-13 06:46:43.598745771 +0000 UTC m=+1037.465340362" observedRunningTime="2025-12-13 06:46:44.316320445 +0000 UTC m=+1038.182915066" watchObservedRunningTime="2025-12-13 06:46:44.326016704 +0000 UTC m=+1038.192611315" Dec 13 06:46:44 crc kubenswrapper[5048]: I1213 06:46:44.327693 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-95949466-xmb6d" podStartSLOduration=7.240964257 podStartE2EDuration="47.32768348s" podCreationTimestamp="2025-12-13 06:45:57 +0000 UTC" firstStartedPulling="2025-12-13 06:45:59.731968188 +0000 UTC m=+993.598562769" lastFinishedPulling="2025-12-13 06:46:39.818687411 +0000 UTC m=+1033.685281992" observedRunningTime="2025-12-13 06:46:41.3954934 +0000 UTC m=+1035.262088001" watchObservedRunningTime="2025-12-13 06:46:44.32768348 +0000 UTC m=+1038.194278101" Dec 13 06:46:44 crc kubenswrapper[5048]: I1213 06:46:44.337375 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-7cf9bd88b6-wswvl" podStartSLOduration=42.556447244 podStartE2EDuration="46.337352909s" podCreationTimestamp="2025-12-13 06:45:58 +0000 UTC" firstStartedPulling="2025-12-13 06:46:39.819344159 +0000 UTC m=+1033.685938740" lastFinishedPulling="2025-12-13 06:46:43.600249824 +0000 UTC m=+1037.466844405" observedRunningTime="2025-12-13 06:46:44.331326592 +0000 UTC m=+1038.197921193" watchObservedRunningTime="2025-12-13 06:46:44.337352909 +0000 UTC m=+1038.203947500" Dec 13 06:46:47 crc kubenswrapper[5048]: E1213 06:46:47.568835 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94\\\"\"" pod="openstack-operators/test-operator-controller-manager-756ccf86c7-dv9ww" podUID="a046de13-f7f7-4d7c-abf3-79ed8cc60fad" Dec 13 06:46:48 crc kubenswrapper[5048]: I1213 06:46:48.333209 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-95949466-xmb6d" Dec 13 06:46:48 crc kubenswrapper[5048]: I1213 06:46:48.350125 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-5cf45c46bd-mblxt" Dec 13 06:46:48 crc kubenswrapper[5048]: I1213 06:46:48.384501 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-ll9d4" Dec 13 06:46:48 crc kubenswrapper[5048]: I1213 06:46:48.400022 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-767f9d7567-md4jk" Dec 13 06:46:48 crc kubenswrapper[5048]: I1213 06:46:48.436057 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-59b8dcb766-sn85z" Dec 13 06:46:48 crc kubenswrapper[5048]: I1213 06:46:48.527157 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-6ccf486b9-g6ggd" Dec 13 06:46:48 crc kubenswrapper[5048]: E1213 06:46:48.568502 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:961417d59f527d925ac48ff6a11de747d0493315e496e34dc83d76a1a1fff58a\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-55f78b7c4c-b4f6l" podUID="7165a29e-88bf-4194-bffa-414a675d1be5" Dec 13 06:46:48 crc kubenswrapper[5048]: I1213 06:46:48.576281 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-f458558d7-xxj48" Dec 13 06:46:48 crc kubenswrapper[5048]: I1213 06:46:48.755592 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5fdd9786f7-mkjnl" Dec 13 06:46:48 crc kubenswrapper[5048]: I1213 06:46:48.826263 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-zgrtn" Dec 13 06:46:48 crc kubenswrapper[5048]: I1213 06:46:48.856701 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-f76f4954c-bpgwj" Dec 13 06:46:48 crc kubenswrapper[5048]: I1213 06:46:48.967552 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-fklqp" Dec 13 06:46:49 crc kubenswrapper[5048]: I1213 06:46:49.028821 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-gqsj6" Dec 13 06:46:49 crc kubenswrapper[5048]: I1213 06:46:49.042235 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-4ck8m" Dec 13 06:46:49 crc kubenswrapper[5048]: I1213 06:46:49.156807 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-8665b56d78-g782d" Dec 13 06:46:49 crc kubenswrapper[5048]: I1213 06:46:49.178722 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-5c6df8f9-sx7sf" Dec 13 06:46:49 crc kubenswrapper[5048]: I1213 06:46:49.179632 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-5c6df8f9-sx7sf" Dec 13 06:46:49 crc kubenswrapper[5048]: E1213 06:46:49.568909 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:f27e732ec1faee765461bf137d9be81278b2fa39675019a73622755e1e610b6f\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-97d456b9-2bg97" podUID="89f4ffab-8c61-4389-8f41-43cd8e2d54de" Dec 13 06:46:50 crc kubenswrapper[5048]: I1213 06:46:50.515483 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-7cf9bd88b6-wswvl" Dec 13 06:46:50 crc kubenswrapper[5048]: I1213 06:46:50.877345 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878" Dec 13 06:46:51 crc kubenswrapper[5048]: I1213 06:46:51.289047 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-58d7cfb75d-nsmnm" Dec 13 06:46:54 crc kubenswrapper[5048]: I1213 06:46:54.390992 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-5c7cbf548f-fdf2l" event={"ID":"148d1633-90ad-45da-af0b-5b182ee41795","Type":"ContainerStarted","Data":"7d1934c093ae55f09d0bb7f1d93ddb410d8686e4647b5f2c6980571e6c167aa2"} Dec 13 06:46:54 crc kubenswrapper[5048]: I1213 06:46:54.391827 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-5c7cbf548f-fdf2l" Dec 13 06:46:54 crc kubenswrapper[5048]: I1213 06:46:54.411474 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-5c7cbf548f-fdf2l" podStartSLOduration=2.831856164 podStartE2EDuration="56.411452968s" podCreationTimestamp="2025-12-13 06:45:58 +0000 UTC" firstStartedPulling="2025-12-13 06:45:59.86639786 +0000 UTC m=+993.732992441" lastFinishedPulling="2025-12-13 06:46:53.445994654 +0000 UTC m=+1047.312589245" observedRunningTime="2025-12-13 06:46:54.406369437 +0000 UTC m=+1048.272964078" watchObservedRunningTime="2025-12-13 06:46:54.411452968 +0000 UTC m=+1048.278047559" Dec 13 06:46:58 crc kubenswrapper[5048]: I1213 06:46:58.659304 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-5c7cbf548f-fdf2l" Dec 13 06:47:03 crc kubenswrapper[5048]: I1213 06:47:03.458309 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-55f78b7c4c-b4f6l" event={"ID":"7165a29e-88bf-4194-bffa-414a675d1be5","Type":"ContainerStarted","Data":"8673bb621a7cf8def2679b657168ba2e7116ff65043064131c2a9590e811482d"} Dec 13 06:47:03 crc kubenswrapper[5048]: I1213 06:47:03.459064 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-55f78b7c4c-b4f6l" Dec 13 06:47:03 crc kubenswrapper[5048]: I1213 06:47:03.459503 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-97d456b9-2bg97" event={"ID":"89f4ffab-8c61-4389-8f41-43cd8e2d54de","Type":"ContainerStarted","Data":"fa4254f27369c53aba9cd6c8dcdc425a82020c40dd5a956e239d8cbe66ae38ce"} Dec 13 06:47:03 crc kubenswrapper[5048]: I1213 06:47:03.459738 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-97d456b9-2bg97" Dec 13 06:47:03 crc kubenswrapper[5048]: I1213 06:47:03.461373 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-756ccf86c7-dv9ww" event={"ID":"a046de13-f7f7-4d7c-abf3-79ed8cc60fad","Type":"ContainerStarted","Data":"dd54c853ba1875831619ea5004acd7fe518538dcfdc976ca0276fd895231bece"} Dec 13 06:47:03 crc kubenswrapper[5048]: I1213 06:47:03.461631 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-756ccf86c7-dv9ww" Dec 13 06:47:03 crc kubenswrapper[5048]: I1213 06:47:03.477975 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-55f78b7c4c-b4f6l" podStartSLOduration=3.271250045 podStartE2EDuration="1m5.477957633s" podCreationTimestamp="2025-12-13 06:45:58 +0000 UTC" firstStartedPulling="2025-12-13 06:46:00.24996976 +0000 UTC m=+994.116564341" lastFinishedPulling="2025-12-13 06:47:02.456677338 +0000 UTC m=+1056.323271929" observedRunningTime="2025-12-13 06:47:03.476272136 +0000 UTC m=+1057.342866717" watchObservedRunningTime="2025-12-13 06:47:03.477957633 +0000 UTC m=+1057.344552214" Dec 13 06:47:03 crc kubenswrapper[5048]: I1213 06:47:03.492318 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-756ccf86c7-dv9ww" podStartSLOduration=2.6363061979999998 podStartE2EDuration="1m5.492303252s" podCreationTimestamp="2025-12-13 06:45:58 +0000 UTC" firstStartedPulling="2025-12-13 06:46:00.262870699 +0000 UTC m=+994.129465280" lastFinishedPulling="2025-12-13 06:47:03.118867753 +0000 UTC m=+1056.985462334" observedRunningTime="2025-12-13 06:47:03.489749281 +0000 UTC m=+1057.356343892" watchObservedRunningTime="2025-12-13 06:47:03.492303252 +0000 UTC m=+1057.358897833" Dec 13 06:47:03 crc kubenswrapper[5048]: I1213 06:47:03.517199 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-97d456b9-2bg97" podStartSLOduration=2.5352894519999998 podStartE2EDuration="1m5.517173392s" podCreationTimestamp="2025-12-13 06:45:58 +0000 UTC" firstStartedPulling="2025-12-13 06:46:00.206141803 +0000 UTC m=+994.072736384" lastFinishedPulling="2025-12-13 06:47:03.188025743 +0000 UTC m=+1057.054620324" observedRunningTime="2025-12-13 06:47:03.511576536 +0000 UTC m=+1057.378171137" watchObservedRunningTime="2025-12-13 06:47:03.517173392 +0000 UTC m=+1057.383767983" Dec 13 06:47:09 crc kubenswrapper[5048]: I1213 06:47:09.214787 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-97d456b9-2bg97" Dec 13 06:47:09 crc kubenswrapper[5048]: I1213 06:47:09.294517 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-756ccf86c7-dv9ww" Dec 13 06:47:09 crc kubenswrapper[5048]: I1213 06:47:09.412228 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-55f78b7c4c-b4f6l" Dec 13 06:47:25 crc kubenswrapper[5048]: I1213 06:47:25.933936 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4bfsg"] Dec 13 06:47:25 crc kubenswrapper[5048]: I1213 06:47:25.936133 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-4bfsg" Dec 13 06:47:25 crc kubenswrapper[5048]: I1213 06:47:25.939660 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-2txbb" Dec 13 06:47:25 crc kubenswrapper[5048]: I1213 06:47:25.939759 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Dec 13 06:47:25 crc kubenswrapper[5048]: I1213 06:47:25.939913 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Dec 13 06:47:25 crc kubenswrapper[5048]: I1213 06:47:25.939958 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Dec 13 06:47:25 crc kubenswrapper[5048]: I1213 06:47:25.940337 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4bfsg"] Dec 13 06:47:25 crc kubenswrapper[5048]: I1213 06:47:25.981872 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnjnl\" (UniqueName: \"kubernetes.io/projected/bcdd7fc3-c63b-4178-a6c7-906ec624fd78-kube-api-access-cnjnl\") pod \"dnsmasq-dns-675f4bcbfc-4bfsg\" (UID: \"bcdd7fc3-c63b-4178-a6c7-906ec624fd78\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4bfsg" Dec 13 06:47:25 crc kubenswrapper[5048]: I1213 06:47:25.982214 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bcdd7fc3-c63b-4178-a6c7-906ec624fd78-config\") pod \"dnsmasq-dns-675f4bcbfc-4bfsg\" (UID: \"bcdd7fc3-c63b-4178-a6c7-906ec624fd78\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4bfsg" Dec 13 06:47:26 crc kubenswrapper[5048]: I1213 06:47:26.002030 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-xdcwb"] Dec 13 06:47:26 crc kubenswrapper[5048]: I1213 06:47:26.004514 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-xdcwb" Dec 13 06:47:26 crc kubenswrapper[5048]: I1213 06:47:26.008282 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Dec 13 06:47:26 crc kubenswrapper[5048]: I1213 06:47:26.018647 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-xdcwb"] Dec 13 06:47:26 crc kubenswrapper[5048]: I1213 06:47:26.083727 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad32555d-5b77-4554-92da-733514b660d0-config\") pod \"dnsmasq-dns-78dd6ddcc-xdcwb\" (UID: \"ad32555d-5b77-4554-92da-733514b660d0\") " pod="openstack/dnsmasq-dns-78dd6ddcc-xdcwb" Dec 13 06:47:26 crc kubenswrapper[5048]: I1213 06:47:26.083821 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnjnl\" (UniqueName: \"kubernetes.io/projected/bcdd7fc3-c63b-4178-a6c7-906ec624fd78-kube-api-access-cnjnl\") pod \"dnsmasq-dns-675f4bcbfc-4bfsg\" (UID: \"bcdd7fc3-c63b-4178-a6c7-906ec624fd78\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4bfsg" Dec 13 06:47:26 crc kubenswrapper[5048]: I1213 06:47:26.083852 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bcdd7fc3-c63b-4178-a6c7-906ec624fd78-config\") pod \"dnsmasq-dns-675f4bcbfc-4bfsg\" (UID: \"bcdd7fc3-c63b-4178-a6c7-906ec624fd78\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4bfsg" Dec 13 06:47:26 crc kubenswrapper[5048]: I1213 06:47:26.083900 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ad32555d-5b77-4554-92da-733514b660d0-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-xdcwb\" (UID: \"ad32555d-5b77-4554-92da-733514b660d0\") " pod="openstack/dnsmasq-dns-78dd6ddcc-xdcwb" Dec 13 06:47:26 crc kubenswrapper[5048]: I1213 06:47:26.083923 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7b4w9\" (UniqueName: \"kubernetes.io/projected/ad32555d-5b77-4554-92da-733514b660d0-kube-api-access-7b4w9\") pod \"dnsmasq-dns-78dd6ddcc-xdcwb\" (UID: \"ad32555d-5b77-4554-92da-733514b660d0\") " pod="openstack/dnsmasq-dns-78dd6ddcc-xdcwb" Dec 13 06:47:26 crc kubenswrapper[5048]: I1213 06:47:26.085161 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bcdd7fc3-c63b-4178-a6c7-906ec624fd78-config\") pod \"dnsmasq-dns-675f4bcbfc-4bfsg\" (UID: \"bcdd7fc3-c63b-4178-a6c7-906ec624fd78\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4bfsg" Dec 13 06:47:26 crc kubenswrapper[5048]: I1213 06:47:26.107474 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnjnl\" (UniqueName: \"kubernetes.io/projected/bcdd7fc3-c63b-4178-a6c7-906ec624fd78-kube-api-access-cnjnl\") pod \"dnsmasq-dns-675f4bcbfc-4bfsg\" (UID: \"bcdd7fc3-c63b-4178-a6c7-906ec624fd78\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4bfsg" Dec 13 06:47:26 crc kubenswrapper[5048]: I1213 06:47:26.185217 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad32555d-5b77-4554-92da-733514b660d0-config\") pod \"dnsmasq-dns-78dd6ddcc-xdcwb\" (UID: \"ad32555d-5b77-4554-92da-733514b660d0\") " pod="openstack/dnsmasq-dns-78dd6ddcc-xdcwb" Dec 13 06:47:26 crc kubenswrapper[5048]: I1213 06:47:26.185348 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ad32555d-5b77-4554-92da-733514b660d0-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-xdcwb\" (UID: \"ad32555d-5b77-4554-92da-733514b660d0\") " pod="openstack/dnsmasq-dns-78dd6ddcc-xdcwb" Dec 13 06:47:26 crc kubenswrapper[5048]: I1213 06:47:26.185388 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7b4w9\" (UniqueName: \"kubernetes.io/projected/ad32555d-5b77-4554-92da-733514b660d0-kube-api-access-7b4w9\") pod \"dnsmasq-dns-78dd6ddcc-xdcwb\" (UID: \"ad32555d-5b77-4554-92da-733514b660d0\") " pod="openstack/dnsmasq-dns-78dd6ddcc-xdcwb" Dec 13 06:47:26 crc kubenswrapper[5048]: I1213 06:47:26.186310 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad32555d-5b77-4554-92da-733514b660d0-config\") pod \"dnsmasq-dns-78dd6ddcc-xdcwb\" (UID: \"ad32555d-5b77-4554-92da-733514b660d0\") " pod="openstack/dnsmasq-dns-78dd6ddcc-xdcwb" Dec 13 06:47:26 crc kubenswrapper[5048]: I1213 06:47:26.186383 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ad32555d-5b77-4554-92da-733514b660d0-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-xdcwb\" (UID: \"ad32555d-5b77-4554-92da-733514b660d0\") " pod="openstack/dnsmasq-dns-78dd6ddcc-xdcwb" Dec 13 06:47:26 crc kubenswrapper[5048]: I1213 06:47:26.205257 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7b4w9\" (UniqueName: \"kubernetes.io/projected/ad32555d-5b77-4554-92da-733514b660d0-kube-api-access-7b4w9\") pod \"dnsmasq-dns-78dd6ddcc-xdcwb\" (UID: \"ad32555d-5b77-4554-92da-733514b660d0\") " pod="openstack/dnsmasq-dns-78dd6ddcc-xdcwb" Dec 13 06:47:26 crc kubenswrapper[5048]: I1213 06:47:26.264919 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-4bfsg" Dec 13 06:47:26 crc kubenswrapper[5048]: I1213 06:47:26.323778 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-xdcwb" Dec 13 06:47:27 crc kubenswrapper[5048]: I1213 06:47:27.213878 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4bfsg"] Dec 13 06:47:27 crc kubenswrapper[5048]: W1213 06:47:27.216907 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbcdd7fc3_c63b_4178_a6c7_906ec624fd78.slice/crio-99f930b200e6008c11d334b5a8a591168c31ea8602f154e5f9173d64a5880a7b WatchSource:0}: Error finding container 99f930b200e6008c11d334b5a8a591168c31ea8602f154e5f9173d64a5880a7b: Status 404 returned error can't find the container with id 99f930b200e6008c11d334b5a8a591168c31ea8602f154e5f9173d64a5880a7b Dec 13 06:47:27 crc kubenswrapper[5048]: I1213 06:47:27.219885 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-xdcwb"] Dec 13 06:47:27 crc kubenswrapper[5048]: I1213 06:47:27.621002 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-4bfsg" event={"ID":"bcdd7fc3-c63b-4178-a6c7-906ec624fd78","Type":"ContainerStarted","Data":"99f930b200e6008c11d334b5a8a591168c31ea8602f154e5f9173d64a5880a7b"} Dec 13 06:47:27 crc kubenswrapper[5048]: I1213 06:47:27.622326 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-xdcwb" event={"ID":"ad32555d-5b77-4554-92da-733514b660d0","Type":"ContainerStarted","Data":"74ec75393f16942dff40387838d8c326fc5e952abc768ed8348a2e9eb26677f8"} Dec 13 06:47:28 crc kubenswrapper[5048]: I1213 06:47:28.708196 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4bfsg"] Dec 13 06:47:28 crc kubenswrapper[5048]: I1213 06:47:28.741987 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-67pvp"] Dec 13 06:47:28 crc kubenswrapper[5048]: I1213 06:47:28.743140 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-67pvp" Dec 13 06:47:28 crc kubenswrapper[5048]: I1213 06:47:28.748535 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-67pvp"] Dec 13 06:47:28 crc kubenswrapper[5048]: I1213 06:47:28.863054 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/856a2a68-3593-4af9-8285-2f80d731f123-dns-svc\") pod \"dnsmasq-dns-666b6646f7-67pvp\" (UID: \"856a2a68-3593-4af9-8285-2f80d731f123\") " pod="openstack/dnsmasq-dns-666b6646f7-67pvp" Dec 13 06:47:28 crc kubenswrapper[5048]: I1213 06:47:28.863123 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/856a2a68-3593-4af9-8285-2f80d731f123-config\") pod \"dnsmasq-dns-666b6646f7-67pvp\" (UID: \"856a2a68-3593-4af9-8285-2f80d731f123\") " pod="openstack/dnsmasq-dns-666b6646f7-67pvp" Dec 13 06:47:28 crc kubenswrapper[5048]: I1213 06:47:28.863161 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gpj7x\" (UniqueName: \"kubernetes.io/projected/856a2a68-3593-4af9-8285-2f80d731f123-kube-api-access-gpj7x\") pod \"dnsmasq-dns-666b6646f7-67pvp\" (UID: \"856a2a68-3593-4af9-8285-2f80d731f123\") " pod="openstack/dnsmasq-dns-666b6646f7-67pvp" Dec 13 06:47:28 crc kubenswrapper[5048]: I1213 06:47:28.964176 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/856a2a68-3593-4af9-8285-2f80d731f123-config\") pod \"dnsmasq-dns-666b6646f7-67pvp\" (UID: \"856a2a68-3593-4af9-8285-2f80d731f123\") " pod="openstack/dnsmasq-dns-666b6646f7-67pvp" Dec 13 06:47:28 crc kubenswrapper[5048]: I1213 06:47:28.964258 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gpj7x\" (UniqueName: \"kubernetes.io/projected/856a2a68-3593-4af9-8285-2f80d731f123-kube-api-access-gpj7x\") pod \"dnsmasq-dns-666b6646f7-67pvp\" (UID: \"856a2a68-3593-4af9-8285-2f80d731f123\") " pod="openstack/dnsmasq-dns-666b6646f7-67pvp" Dec 13 06:47:28 crc kubenswrapper[5048]: I1213 06:47:28.964337 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/856a2a68-3593-4af9-8285-2f80d731f123-dns-svc\") pod \"dnsmasq-dns-666b6646f7-67pvp\" (UID: \"856a2a68-3593-4af9-8285-2f80d731f123\") " pod="openstack/dnsmasq-dns-666b6646f7-67pvp" Dec 13 06:47:28 crc kubenswrapper[5048]: I1213 06:47:28.965254 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/856a2a68-3593-4af9-8285-2f80d731f123-dns-svc\") pod \"dnsmasq-dns-666b6646f7-67pvp\" (UID: \"856a2a68-3593-4af9-8285-2f80d731f123\") " pod="openstack/dnsmasq-dns-666b6646f7-67pvp" Dec 13 06:47:28 crc kubenswrapper[5048]: I1213 06:47:28.966689 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/856a2a68-3593-4af9-8285-2f80d731f123-config\") pod \"dnsmasq-dns-666b6646f7-67pvp\" (UID: \"856a2a68-3593-4af9-8285-2f80d731f123\") " pod="openstack/dnsmasq-dns-666b6646f7-67pvp" Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.000338 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gpj7x\" (UniqueName: \"kubernetes.io/projected/856a2a68-3593-4af9-8285-2f80d731f123-kube-api-access-gpj7x\") pod \"dnsmasq-dns-666b6646f7-67pvp\" (UID: \"856a2a68-3593-4af9-8285-2f80d731f123\") " pod="openstack/dnsmasq-dns-666b6646f7-67pvp" Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.050347 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-xdcwb"] Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.066229 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-67pvp" Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.079394 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-fvvwg"] Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.081024 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.099080 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-fvvwg"] Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.167224 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2sl8x\" (UniqueName: \"kubernetes.io/projected/e70bef65-3e49-4e8c-8311-1c490a03fbe0-kube-api-access-2sl8x\") pod \"dnsmasq-dns-57d769cc4f-fvvwg\" (UID: \"e70bef65-3e49-4e8c-8311-1c490a03fbe0\") " pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.167308 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e70bef65-3e49-4e8c-8311-1c490a03fbe0-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-fvvwg\" (UID: \"e70bef65-3e49-4e8c-8311-1c490a03fbe0\") " pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.167344 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e70bef65-3e49-4e8c-8311-1c490a03fbe0-config\") pod \"dnsmasq-dns-57d769cc4f-fvvwg\" (UID: \"e70bef65-3e49-4e8c-8311-1c490a03fbe0\") " pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.268512 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e70bef65-3e49-4e8c-8311-1c490a03fbe0-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-fvvwg\" (UID: \"e70bef65-3e49-4e8c-8311-1c490a03fbe0\") " pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.268880 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e70bef65-3e49-4e8c-8311-1c490a03fbe0-config\") pod \"dnsmasq-dns-57d769cc4f-fvvwg\" (UID: \"e70bef65-3e49-4e8c-8311-1c490a03fbe0\") " pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.268943 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2sl8x\" (UniqueName: \"kubernetes.io/projected/e70bef65-3e49-4e8c-8311-1c490a03fbe0-kube-api-access-2sl8x\") pod \"dnsmasq-dns-57d769cc4f-fvvwg\" (UID: \"e70bef65-3e49-4e8c-8311-1c490a03fbe0\") " pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.269548 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e70bef65-3e49-4e8c-8311-1c490a03fbe0-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-fvvwg\" (UID: \"e70bef65-3e49-4e8c-8311-1c490a03fbe0\") " pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.269858 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e70bef65-3e49-4e8c-8311-1c490a03fbe0-config\") pod \"dnsmasq-dns-57d769cc4f-fvvwg\" (UID: \"e70bef65-3e49-4e8c-8311-1c490a03fbe0\") " pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.290093 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2sl8x\" (UniqueName: \"kubernetes.io/projected/e70bef65-3e49-4e8c-8311-1c490a03fbe0-kube-api-access-2sl8x\") pod \"dnsmasq-dns-57d769cc4f-fvvwg\" (UID: \"e70bef65-3e49-4e8c-8311-1c490a03fbe0\") " pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.479148 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.587562 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-67pvp"] Dec 13 06:47:29 crc kubenswrapper[5048]: W1213 06:47:29.596950 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod856a2a68_3593_4af9_8285_2f80d731f123.slice/crio-eb4ef89afc7a1dd2965f3e4b4b35dabc9bd2b7d33919bf62a13dafb110546634 WatchSource:0}: Error finding container eb4ef89afc7a1dd2965f3e4b4b35dabc9bd2b7d33919bf62a13dafb110546634: Status 404 returned error can't find the container with id eb4ef89afc7a1dd2965f3e4b4b35dabc9bd2b7d33919bf62a13dafb110546634 Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.653802 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-67pvp" event={"ID":"856a2a68-3593-4af9-8285-2f80d731f123","Type":"ContainerStarted","Data":"eb4ef89afc7a1dd2965f3e4b4b35dabc9bd2b7d33919bf62a13dafb110546634"} Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.911107 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.912580 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.914626 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-qk64f" Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.915875 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.915918 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.916159 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.916232 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.916425 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.917113 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.927976 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 13 06:47:29 crc kubenswrapper[5048]: I1213 06:47:29.997612 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-fvvwg"] Dec 13 06:47:30 crc kubenswrapper[5048]: W1213 06:47:30.011944 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode70bef65_3e49_4e8c_8311_1c490a03fbe0.slice/crio-384d0b0ff7c45c873bc67106848a894adbef57e91ced3e668311cea43ce6639a WatchSource:0}: Error finding container 384d0b0ff7c45c873bc67106848a894adbef57e91ced3e668311cea43ce6639a: Status 404 returned error can't find the container with id 384d0b0ff7c45c873bc67106848a894adbef57e91ced3e668311cea43ce6639a Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.084218 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a7bbc535-10f7-44cc-89a6-cbb697149e4a-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.084272 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a7bbc535-10f7-44cc-89a6-cbb697149e4a-server-conf\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.084316 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.084388 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a7bbc535-10f7-44cc-89a6-cbb697149e4a-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.084455 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fbhlw\" (UniqueName: \"kubernetes.io/projected/a7bbc535-10f7-44cc-89a6-cbb697149e4a-kube-api-access-fbhlw\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.084484 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a7bbc535-10f7-44cc-89a6-cbb697149e4a-pod-info\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.084513 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a7bbc535-10f7-44cc-89a6-cbb697149e4a-config-data\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.084545 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.084583 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.084644 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.084682 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.185563 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.185648 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.185684 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.185715 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a7bbc535-10f7-44cc-89a6-cbb697149e4a-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.185731 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a7bbc535-10f7-44cc-89a6-cbb697149e4a-server-conf\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.185755 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.185780 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a7bbc535-10f7-44cc-89a6-cbb697149e4a-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.185799 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fbhlw\" (UniqueName: \"kubernetes.io/projected/a7bbc535-10f7-44cc-89a6-cbb697149e4a-kube-api-access-fbhlw\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.185820 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a7bbc535-10f7-44cc-89a6-cbb697149e4a-pod-info\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.185836 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a7bbc535-10f7-44cc-89a6-cbb697149e4a-config-data\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.185859 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.186178 5048 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.186356 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.187012 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.187555 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a7bbc535-10f7-44cc-89a6-cbb697149e4a-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.191368 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a7bbc535-10f7-44cc-89a6-cbb697149e4a-config-data\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.192387 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a7bbc535-10f7-44cc-89a6-cbb697149e4a-server-conf\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.193258 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.194026 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.194308 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a7bbc535-10f7-44cc-89a6-cbb697149e4a-pod-info\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.194791 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a7bbc535-10f7-44cc-89a6-cbb697149e4a-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.208704 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fbhlw\" (UniqueName: \"kubernetes.io/projected/a7bbc535-10f7-44cc-89a6-cbb697149e4a-kube-api-access-fbhlw\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.214110 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.226074 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.230518 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.230633 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.233202 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.233233 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-gnwjj" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.233394 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.233545 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.233673 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.233867 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.234233 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.388787 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e3a0bce1-8848-4ac7-a030-19640b952708-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.389063 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.389086 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e3a0bce1-8848-4ac7-a030-19640b952708-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.389112 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e3a0bce1-8848-4ac7-a030-19640b952708-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.389139 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.389155 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.389196 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e3a0bce1-8848-4ac7-a030-19640b952708-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.389227 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdlfb\" (UniqueName: \"kubernetes.io/projected/e3a0bce1-8848-4ac7-a030-19640b952708-kube-api-access-zdlfb\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.389251 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.389266 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.389287 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e3a0bce1-8848-4ac7-a030-19640b952708-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.494142 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e3a0bce1-8848-4ac7-a030-19640b952708-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.494202 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdlfb\" (UniqueName: \"kubernetes.io/projected/e3a0bce1-8848-4ac7-a030-19640b952708-kube-api-access-zdlfb\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.494294 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.494396 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.494415 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e3a0bce1-8848-4ac7-a030-19640b952708-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.495561 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e3a0bce1-8848-4ac7-a030-19640b952708-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.495942 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e3a0bce1-8848-4ac7-a030-19640b952708-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.496372 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.496960 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e3a0bce1-8848-4ac7-a030-19640b952708-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.496995 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.498332 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.503262 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e3a0bce1-8848-4ac7-a030-19640b952708-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.503668 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e3a0bce1-8848-4ac7-a030-19640b952708-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.503779 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e3a0bce1-8848-4ac7-a030-19640b952708-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.503853 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.503880 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.504368 5048 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.504547 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e3a0bce1-8848-4ac7-a030-19640b952708-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.508647 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e3a0bce1-8848-4ac7-a030-19640b952708-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.509135 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.519527 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.523426 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdlfb\" (UniqueName: \"kubernetes.io/projected/e3a0bce1-8848-4ac7-a030-19640b952708-kube-api-access-zdlfb\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.543124 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.551644 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.601155 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:47:30 crc kubenswrapper[5048]: I1213 06:47:30.665577 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" event={"ID":"e70bef65-3e49-4e8c-8311-1c490a03fbe0","Type":"ContainerStarted","Data":"384d0b0ff7c45c873bc67106848a894adbef57e91ced3e668311cea43ce6639a"} Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.154033 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.161528 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.576617 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.580584 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.583160 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.583736 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.584268 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.584480 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-q7tkg" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.596704 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.599637 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.749963 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a5870a17-c845-46b8-a25c-8f8822a93cb8-config-data-default\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.751184 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2pmz\" (UniqueName: \"kubernetes.io/projected/a5870a17-c845-46b8-a25c-8f8822a93cb8-kube-api-access-m2pmz\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.751261 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5870a17-c845-46b8-a25c-8f8822a93cb8-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.751293 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a5870a17-c845-46b8-a25c-8f8822a93cb8-operator-scripts\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.751374 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5870a17-c845-46b8-a25c-8f8822a93cb8-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.751465 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a5870a17-c845-46b8-a25c-8f8822a93cb8-config-data-generated\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.751554 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.751640 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a5870a17-c845-46b8-a25c-8f8822a93cb8-kolla-config\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.863424 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a5870a17-c845-46b8-a25c-8f8822a93cb8-config-data-generated\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.863504 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.863555 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a5870a17-c845-46b8-a25c-8f8822a93cb8-kolla-config\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.863598 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a5870a17-c845-46b8-a25c-8f8822a93cb8-config-data-default\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.863633 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2pmz\" (UniqueName: \"kubernetes.io/projected/a5870a17-c845-46b8-a25c-8f8822a93cb8-kube-api-access-m2pmz\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.863655 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5870a17-c845-46b8-a25c-8f8822a93cb8-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.863672 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a5870a17-c845-46b8-a25c-8f8822a93cb8-operator-scripts\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.863700 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5870a17-c845-46b8-a25c-8f8822a93cb8-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.865417 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a5870a17-c845-46b8-a25c-8f8822a93cb8-config-data-default\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.865667 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a5870a17-c845-46b8-a25c-8f8822a93cb8-config-data-generated\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.865910 5048 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.869724 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5870a17-c845-46b8-a25c-8f8822a93cb8-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.870567 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a5870a17-c845-46b8-a25c-8f8822a93cb8-kolla-config\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.873775 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a5870a17-c845-46b8-a25c-8f8822a93cb8-operator-scripts\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.894993 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2pmz\" (UniqueName: \"kubernetes.io/projected/a5870a17-c845-46b8-a25c-8f8822a93cb8-kube-api-access-m2pmz\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.901337 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5870a17-c845-46b8-a25c-8f8822a93cb8-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:31 crc kubenswrapper[5048]: I1213 06:47:31.965256 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"a5870a17-c845-46b8-a25c-8f8822a93cb8\") " pod="openstack/openstack-galera-0" Dec 13 06:47:32 crc kubenswrapper[5048]: I1213 06:47:32.213743 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Dec 13 06:47:32 crc kubenswrapper[5048]: I1213 06:47:32.897558 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 13 06:47:32 crc kubenswrapper[5048]: I1213 06:47:32.899101 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:32 crc kubenswrapper[5048]: I1213 06:47:32.901263 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Dec 13 06:47:32 crc kubenswrapper[5048]: I1213 06:47:32.901417 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-gzhjz" Dec 13 06:47:32 crc kubenswrapper[5048]: I1213 06:47:32.901857 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Dec 13 06:47:32 crc kubenswrapper[5048]: I1213 06:47:32.901882 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Dec 13 06:47:32 crc kubenswrapper[5048]: I1213 06:47:32.933047 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.085162 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/be46506c-41d0-4b9f-92bd-f34eb6d6a1aa-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.085301 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be46506c-41d0-4b9f-92bd-f34eb6d6a1aa-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.085404 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/be46506c-41d0-4b9f-92bd-f34eb6d6a1aa-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.085579 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/be46506c-41d0-4b9f-92bd-f34eb6d6a1aa-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.085653 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/be46506c-41d0-4b9f-92bd-f34eb6d6a1aa-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.085756 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nsmbh\" (UniqueName: \"kubernetes.io/projected/be46506c-41d0-4b9f-92bd-f34eb6d6a1aa-kube-api-access-nsmbh\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.085821 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.085858 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be46506c-41d0-4b9f-92bd-f34eb6d6a1aa-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.089839 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.093384 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.097858 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.098110 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.098262 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-vptkb" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.111368 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.187571 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/be46506c-41d0-4b9f-92bd-f34eb6d6a1aa-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.187687 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be46506c-41d0-4b9f-92bd-f34eb6d6a1aa-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.187739 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/be46506c-41d0-4b9f-92bd-f34eb6d6a1aa-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.187790 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/be46506c-41d0-4b9f-92bd-f34eb6d6a1aa-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.187818 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/be46506c-41d0-4b9f-92bd-f34eb6d6a1aa-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.187839 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nsmbh\" (UniqueName: \"kubernetes.io/projected/be46506c-41d0-4b9f-92bd-f34eb6d6a1aa-kube-api-access-nsmbh\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.187864 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.187891 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be46506c-41d0-4b9f-92bd-f34eb6d6a1aa-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.189227 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/be46506c-41d0-4b9f-92bd-f34eb6d6a1aa-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.189454 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/be46506c-41d0-4b9f-92bd-f34eb6d6a1aa-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.189818 5048 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.190153 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/be46506c-41d0-4b9f-92bd-f34eb6d6a1aa-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.192856 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be46506c-41d0-4b9f-92bd-f34eb6d6a1aa-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.196034 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be46506c-41d0-4b9f-92bd-f34eb6d6a1aa-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.210813 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/be46506c-41d0-4b9f-92bd-f34eb6d6a1aa-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.211577 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nsmbh\" (UniqueName: \"kubernetes.io/projected/be46506c-41d0-4b9f-92bd-f34eb6d6a1aa-kube-api-access-nsmbh\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.214279 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa\") " pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.238008 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.289448 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c468ccc6-8384-4a69-ae41-cca18f9233e3-config-data\") pod \"memcached-0\" (UID: \"c468ccc6-8384-4a69-ae41-cca18f9233e3\") " pod="openstack/memcached-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.289512 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hqd6k\" (UniqueName: \"kubernetes.io/projected/c468ccc6-8384-4a69-ae41-cca18f9233e3-kube-api-access-hqd6k\") pod \"memcached-0\" (UID: \"c468ccc6-8384-4a69-ae41-cca18f9233e3\") " pod="openstack/memcached-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.289614 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c468ccc6-8384-4a69-ae41-cca18f9233e3-combined-ca-bundle\") pod \"memcached-0\" (UID: \"c468ccc6-8384-4a69-ae41-cca18f9233e3\") " pod="openstack/memcached-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.289648 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c468ccc6-8384-4a69-ae41-cca18f9233e3-kolla-config\") pod \"memcached-0\" (UID: \"c468ccc6-8384-4a69-ae41-cca18f9233e3\") " pod="openstack/memcached-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.289670 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/c468ccc6-8384-4a69-ae41-cca18f9233e3-memcached-tls-certs\") pod \"memcached-0\" (UID: \"c468ccc6-8384-4a69-ae41-cca18f9233e3\") " pod="openstack/memcached-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.391457 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c468ccc6-8384-4a69-ae41-cca18f9233e3-combined-ca-bundle\") pod \"memcached-0\" (UID: \"c468ccc6-8384-4a69-ae41-cca18f9233e3\") " pod="openstack/memcached-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.391506 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c468ccc6-8384-4a69-ae41-cca18f9233e3-kolla-config\") pod \"memcached-0\" (UID: \"c468ccc6-8384-4a69-ae41-cca18f9233e3\") " pod="openstack/memcached-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.391525 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/c468ccc6-8384-4a69-ae41-cca18f9233e3-memcached-tls-certs\") pod \"memcached-0\" (UID: \"c468ccc6-8384-4a69-ae41-cca18f9233e3\") " pod="openstack/memcached-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.391560 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c468ccc6-8384-4a69-ae41-cca18f9233e3-config-data\") pod \"memcached-0\" (UID: \"c468ccc6-8384-4a69-ae41-cca18f9233e3\") " pod="openstack/memcached-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.391601 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hqd6k\" (UniqueName: \"kubernetes.io/projected/c468ccc6-8384-4a69-ae41-cca18f9233e3-kube-api-access-hqd6k\") pod \"memcached-0\" (UID: \"c468ccc6-8384-4a69-ae41-cca18f9233e3\") " pod="openstack/memcached-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.393239 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c468ccc6-8384-4a69-ae41-cca18f9233e3-config-data\") pod \"memcached-0\" (UID: \"c468ccc6-8384-4a69-ae41-cca18f9233e3\") " pod="openstack/memcached-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.395207 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/c468ccc6-8384-4a69-ae41-cca18f9233e3-memcached-tls-certs\") pod \"memcached-0\" (UID: \"c468ccc6-8384-4a69-ae41-cca18f9233e3\") " pod="openstack/memcached-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.395547 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c468ccc6-8384-4a69-ae41-cca18f9233e3-kolla-config\") pod \"memcached-0\" (UID: \"c468ccc6-8384-4a69-ae41-cca18f9233e3\") " pod="openstack/memcached-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.398155 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c468ccc6-8384-4a69-ae41-cca18f9233e3-combined-ca-bundle\") pod \"memcached-0\" (UID: \"c468ccc6-8384-4a69-ae41-cca18f9233e3\") " pod="openstack/memcached-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.414199 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqd6k\" (UniqueName: \"kubernetes.io/projected/c468ccc6-8384-4a69-ae41-cca18f9233e3-kube-api-access-hqd6k\") pod \"memcached-0\" (UID: \"c468ccc6-8384-4a69-ae41-cca18f9233e3\") " pod="openstack/memcached-0" Dec 13 06:47:33 crc kubenswrapper[5048]: I1213 06:47:33.441247 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Dec 13 06:47:35 crc kubenswrapper[5048]: I1213 06:47:35.200079 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Dec 13 06:47:35 crc kubenswrapper[5048]: I1213 06:47:35.201556 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 13 06:47:35 crc kubenswrapper[5048]: I1213 06:47:35.211863 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 13 06:47:35 crc kubenswrapper[5048]: I1213 06:47:35.236517 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-6bph2" Dec 13 06:47:35 crc kubenswrapper[5048]: I1213 06:47:35.320710 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mlj5\" (UniqueName: \"kubernetes.io/projected/97a8bb68-6317-4a75-bf87-f7d6c6cb0023-kube-api-access-7mlj5\") pod \"kube-state-metrics-0\" (UID: \"97a8bb68-6317-4a75-bf87-f7d6c6cb0023\") " pod="openstack/kube-state-metrics-0" Dec 13 06:47:35 crc kubenswrapper[5048]: I1213 06:47:35.422798 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mlj5\" (UniqueName: \"kubernetes.io/projected/97a8bb68-6317-4a75-bf87-f7d6c6cb0023-kube-api-access-7mlj5\") pod \"kube-state-metrics-0\" (UID: \"97a8bb68-6317-4a75-bf87-f7d6c6cb0023\") " pod="openstack/kube-state-metrics-0" Dec 13 06:47:35 crc kubenswrapper[5048]: I1213 06:47:35.442240 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mlj5\" (UniqueName: \"kubernetes.io/projected/97a8bb68-6317-4a75-bf87-f7d6c6cb0023-kube-api-access-7mlj5\") pod \"kube-state-metrics-0\" (UID: \"97a8bb68-6317-4a75-bf87-f7d6c6cb0023\") " pod="openstack/kube-state-metrics-0" Dec 13 06:47:35 crc kubenswrapper[5048]: I1213 06:47:35.554695 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 13 06:47:35 crc kubenswrapper[5048]: I1213 06:47:35.714899 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e3a0bce1-8848-4ac7-a030-19640b952708","Type":"ContainerStarted","Data":"ff8ddcca404a55d9d44ca4ef95d5b60c4be2b56540a9e152bd26bda198734473"} Dec 13 06:47:35 crc kubenswrapper[5048]: I1213 06:47:35.716133 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"a7bbc535-10f7-44cc-89a6-cbb697149e4a","Type":"ContainerStarted","Data":"eb323ae4c8b80fcb7c3da407b10e37d9ffd08382ef8f5995222bdb508be16a06"} Dec 13 06:47:36 crc kubenswrapper[5048]: I1213 06:47:36.227072 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.474403 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-2sh28"] Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.476943 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.495452 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.495838 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.496127 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-t8wj8" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.496298 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-2sh28"] Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.512084 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-x5dfv"] Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.513933 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.536202 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-x5dfv"] Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.572549 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8258a39-dbbb-4672-9d88-22749f0c9563-ovn-controller-tls-certs\") pod \"ovn-controller-2sh28\" (UID: \"a8258a39-dbbb-4672-9d88-22749f0c9563\") " pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.572636 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/a8258a39-dbbb-4672-9d88-22749f0c9563-var-log-ovn\") pod \"ovn-controller-2sh28\" (UID: \"a8258a39-dbbb-4672-9d88-22749f0c9563\") " pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.572665 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a8258a39-dbbb-4672-9d88-22749f0c9563-var-run\") pod \"ovn-controller-2sh28\" (UID: \"a8258a39-dbbb-4672-9d88-22749f0c9563\") " pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.572716 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gd567\" (UniqueName: \"kubernetes.io/projected/a8258a39-dbbb-4672-9d88-22749f0c9563-kube-api-access-gd567\") pod \"ovn-controller-2sh28\" (UID: \"a8258a39-dbbb-4672-9d88-22749f0c9563\") " pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.572759 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8258a39-dbbb-4672-9d88-22749f0c9563-combined-ca-bundle\") pod \"ovn-controller-2sh28\" (UID: \"a8258a39-dbbb-4672-9d88-22749f0c9563\") " pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.572933 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/a8258a39-dbbb-4672-9d88-22749f0c9563-var-run-ovn\") pod \"ovn-controller-2sh28\" (UID: \"a8258a39-dbbb-4672-9d88-22749f0c9563\") " pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.572979 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a8258a39-dbbb-4672-9d88-22749f0c9563-scripts\") pod \"ovn-controller-2sh28\" (UID: \"a8258a39-dbbb-4672-9d88-22749f0c9563\") " pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.674928 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/a8258a39-dbbb-4672-9d88-22749f0c9563-var-log-ovn\") pod \"ovn-controller-2sh28\" (UID: \"a8258a39-dbbb-4672-9d88-22749f0c9563\") " pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.674966 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a8258a39-dbbb-4672-9d88-22749f0c9563-var-run\") pod \"ovn-controller-2sh28\" (UID: \"a8258a39-dbbb-4672-9d88-22749f0c9563\") " pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.675002 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gd567\" (UniqueName: \"kubernetes.io/projected/a8258a39-dbbb-4672-9d88-22749f0c9563-kube-api-access-gd567\") pod \"ovn-controller-2sh28\" (UID: \"a8258a39-dbbb-4672-9d88-22749f0c9563\") " pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.675042 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/df1e9604-d8b9-4e08-8eb1-9c30b73f6d70-var-log\") pod \"ovn-controller-ovs-x5dfv\" (UID: \"df1e9604-d8b9-4e08-8eb1-9c30b73f6d70\") " pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.675072 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cpt9m\" (UniqueName: \"kubernetes.io/projected/df1e9604-d8b9-4e08-8eb1-9c30b73f6d70-kube-api-access-cpt9m\") pod \"ovn-controller-ovs-x5dfv\" (UID: \"df1e9604-d8b9-4e08-8eb1-9c30b73f6d70\") " pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.675098 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8258a39-dbbb-4672-9d88-22749f0c9563-combined-ca-bundle\") pod \"ovn-controller-2sh28\" (UID: \"a8258a39-dbbb-4672-9d88-22749f0c9563\") " pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.675139 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/df1e9604-d8b9-4e08-8eb1-9c30b73f6d70-scripts\") pod \"ovn-controller-ovs-x5dfv\" (UID: \"df1e9604-d8b9-4e08-8eb1-9c30b73f6d70\") " pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.675161 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a8258a39-dbbb-4672-9d88-22749f0c9563-scripts\") pod \"ovn-controller-2sh28\" (UID: \"a8258a39-dbbb-4672-9d88-22749f0c9563\") " pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.675178 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/a8258a39-dbbb-4672-9d88-22749f0c9563-var-run-ovn\") pod \"ovn-controller-2sh28\" (UID: \"a8258a39-dbbb-4672-9d88-22749f0c9563\") " pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.675207 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/df1e9604-d8b9-4e08-8eb1-9c30b73f6d70-etc-ovs\") pod \"ovn-controller-ovs-x5dfv\" (UID: \"df1e9604-d8b9-4e08-8eb1-9c30b73f6d70\") " pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.675238 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8258a39-dbbb-4672-9d88-22749f0c9563-ovn-controller-tls-certs\") pod \"ovn-controller-2sh28\" (UID: \"a8258a39-dbbb-4672-9d88-22749f0c9563\") " pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.675276 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/df1e9604-d8b9-4e08-8eb1-9c30b73f6d70-var-run\") pod \"ovn-controller-ovs-x5dfv\" (UID: \"df1e9604-d8b9-4e08-8eb1-9c30b73f6d70\") " pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.675294 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/df1e9604-d8b9-4e08-8eb1-9c30b73f6d70-var-lib\") pod \"ovn-controller-ovs-x5dfv\" (UID: \"df1e9604-d8b9-4e08-8eb1-9c30b73f6d70\") " pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.675890 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/a8258a39-dbbb-4672-9d88-22749f0c9563-var-log-ovn\") pod \"ovn-controller-2sh28\" (UID: \"a8258a39-dbbb-4672-9d88-22749f0c9563\") " pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.675983 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a8258a39-dbbb-4672-9d88-22749f0c9563-var-run\") pod \"ovn-controller-2sh28\" (UID: \"a8258a39-dbbb-4672-9d88-22749f0c9563\") " pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.676039 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/a8258a39-dbbb-4672-9d88-22749f0c9563-var-run-ovn\") pod \"ovn-controller-2sh28\" (UID: \"a8258a39-dbbb-4672-9d88-22749f0c9563\") " pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.684731 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8258a39-dbbb-4672-9d88-22749f0c9563-combined-ca-bundle\") pod \"ovn-controller-2sh28\" (UID: \"a8258a39-dbbb-4672-9d88-22749f0c9563\") " pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.687983 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a8258a39-dbbb-4672-9d88-22749f0c9563-scripts\") pod \"ovn-controller-2sh28\" (UID: \"a8258a39-dbbb-4672-9d88-22749f0c9563\") " pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.689494 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8258a39-dbbb-4672-9d88-22749f0c9563-ovn-controller-tls-certs\") pod \"ovn-controller-2sh28\" (UID: \"a8258a39-dbbb-4672-9d88-22749f0c9563\") " pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.699118 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gd567\" (UniqueName: \"kubernetes.io/projected/a8258a39-dbbb-4672-9d88-22749f0c9563-kube-api-access-gd567\") pod \"ovn-controller-2sh28\" (UID: \"a8258a39-dbbb-4672-9d88-22749f0c9563\") " pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.777235 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/df1e9604-d8b9-4e08-8eb1-9c30b73f6d70-var-log\") pod \"ovn-controller-ovs-x5dfv\" (UID: \"df1e9604-d8b9-4e08-8eb1-9c30b73f6d70\") " pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.777285 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cpt9m\" (UniqueName: \"kubernetes.io/projected/df1e9604-d8b9-4e08-8eb1-9c30b73f6d70-kube-api-access-cpt9m\") pod \"ovn-controller-ovs-x5dfv\" (UID: \"df1e9604-d8b9-4e08-8eb1-9c30b73f6d70\") " pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.777347 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/df1e9604-d8b9-4e08-8eb1-9c30b73f6d70-scripts\") pod \"ovn-controller-ovs-x5dfv\" (UID: \"df1e9604-d8b9-4e08-8eb1-9c30b73f6d70\") " pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.777375 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/df1e9604-d8b9-4e08-8eb1-9c30b73f6d70-etc-ovs\") pod \"ovn-controller-ovs-x5dfv\" (UID: \"df1e9604-d8b9-4e08-8eb1-9c30b73f6d70\") " pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.777424 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/df1e9604-d8b9-4e08-8eb1-9c30b73f6d70-var-run\") pod \"ovn-controller-ovs-x5dfv\" (UID: \"df1e9604-d8b9-4e08-8eb1-9c30b73f6d70\") " pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.777470 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/df1e9604-d8b9-4e08-8eb1-9c30b73f6d70-var-lib\") pod \"ovn-controller-ovs-x5dfv\" (UID: \"df1e9604-d8b9-4e08-8eb1-9c30b73f6d70\") " pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.777821 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/df1e9604-d8b9-4e08-8eb1-9c30b73f6d70-var-lib\") pod \"ovn-controller-ovs-x5dfv\" (UID: \"df1e9604-d8b9-4e08-8eb1-9c30b73f6d70\") " pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.777869 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/df1e9604-d8b9-4e08-8eb1-9c30b73f6d70-var-log\") pod \"ovn-controller-ovs-x5dfv\" (UID: \"df1e9604-d8b9-4e08-8eb1-9c30b73f6d70\") " pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.777913 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/df1e9604-d8b9-4e08-8eb1-9c30b73f6d70-etc-ovs\") pod \"ovn-controller-ovs-x5dfv\" (UID: \"df1e9604-d8b9-4e08-8eb1-9c30b73f6d70\") " pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.777896 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/df1e9604-d8b9-4e08-8eb1-9c30b73f6d70-var-run\") pod \"ovn-controller-ovs-x5dfv\" (UID: \"df1e9604-d8b9-4e08-8eb1-9c30b73f6d70\") " pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.781240 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/df1e9604-d8b9-4e08-8eb1-9c30b73f6d70-scripts\") pod \"ovn-controller-ovs-x5dfv\" (UID: \"df1e9604-d8b9-4e08-8eb1-9c30b73f6d70\") " pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.795240 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cpt9m\" (UniqueName: \"kubernetes.io/projected/df1e9604-d8b9-4e08-8eb1-9c30b73f6d70-kube-api-access-cpt9m\") pod \"ovn-controller-ovs-x5dfv\" (UID: \"df1e9604-d8b9-4e08-8eb1-9c30b73f6d70\") " pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.818910 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-2sh28" Dec 13 06:47:38 crc kubenswrapper[5048]: I1213 06:47:38.843842 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.378418 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.379673 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.381710 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.381740 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-n9dg9" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.382731 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.382969 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.387735 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.398602 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.490215 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c7d8f14-b731-4408-b506-2dac81b2a0a7-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.490283 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c7d8f14-b731-4408-b506-2dac81b2a0a7-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.490316 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8c7d8f14-b731-4408-b506-2dac81b2a0a7-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.490393 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c7d8f14-b731-4408-b506-2dac81b2a0a7-config\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.490417 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r725p\" (UniqueName: \"kubernetes.io/projected/8c7d8f14-b731-4408-b506-2dac81b2a0a7-kube-api-access-r725p\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.490458 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8c7d8f14-b731-4408-b506-2dac81b2a0a7-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.490578 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.490638 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c7d8f14-b731-4408-b506-2dac81b2a0a7-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.592351 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c7d8f14-b731-4408-b506-2dac81b2a0a7-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.592403 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8c7d8f14-b731-4408-b506-2dac81b2a0a7-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.592466 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c7d8f14-b731-4408-b506-2dac81b2a0a7-config\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.592492 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r725p\" (UniqueName: \"kubernetes.io/projected/8c7d8f14-b731-4408-b506-2dac81b2a0a7-kube-api-access-r725p\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.592535 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8c7d8f14-b731-4408-b506-2dac81b2a0a7-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.592558 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.592573 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c7d8f14-b731-4408-b506-2dac81b2a0a7-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.592656 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c7d8f14-b731-4408-b506-2dac81b2a0a7-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.593267 5048 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.593799 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c7d8f14-b731-4408-b506-2dac81b2a0a7-config\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.594039 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8c7d8f14-b731-4408-b506-2dac81b2a0a7-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.594207 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8c7d8f14-b731-4408-b506-2dac81b2a0a7-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.597246 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c7d8f14-b731-4408-b506-2dac81b2a0a7-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.598020 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c7d8f14-b731-4408-b506-2dac81b2a0a7-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.606365 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c7d8f14-b731-4408-b506-2dac81b2a0a7-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.608848 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r725p\" (UniqueName: \"kubernetes.io/projected/8c7d8f14-b731-4408-b506-2dac81b2a0a7-kube-api-access-r725p\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.617287 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-nb-0\" (UID: \"8c7d8f14-b731-4408-b506-2dac81b2a0a7\") " pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:39 crc kubenswrapper[5048]: I1213 06:47:39.704354 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Dec 13 06:47:40 crc kubenswrapper[5048]: W1213 06:47:40.961302 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbe46506c_41d0_4b9f_92bd_f34eb6d6a1aa.slice/crio-d96f2c2a7758a93c03432420101ec9f05ec900a8e5d881efce9b85601c009016 WatchSource:0}: Error finding container d96f2c2a7758a93c03432420101ec9f05ec900a8e5d881efce9b85601c009016: Status 404 returned error can't find the container with id d96f2c2a7758a93c03432420101ec9f05ec900a8e5d881efce9b85601c009016 Dec 13 06:47:41 crc kubenswrapper[5048]: I1213 06:47:41.763457 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa","Type":"ContainerStarted","Data":"d96f2c2a7758a93c03432420101ec9f05ec900a8e5d881efce9b85601c009016"} Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.271696 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.273223 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.275704 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.275882 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.277258 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.278850 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-vx2rw" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.287311 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.452010 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/932081d6-9a70-45ec-8738-f0b1265a2a84-config\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.452317 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/932081d6-9a70-45ec-8738-f0b1265a2a84-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.452450 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.452557 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7f8j\" (UniqueName: \"kubernetes.io/projected/932081d6-9a70-45ec-8738-f0b1265a2a84-kube-api-access-v7f8j\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.452636 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/932081d6-9a70-45ec-8738-f0b1265a2a84-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.452737 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/932081d6-9a70-45ec-8738-f0b1265a2a84-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.452869 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/932081d6-9a70-45ec-8738-f0b1265a2a84-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.452946 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/932081d6-9a70-45ec-8738-f0b1265a2a84-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.554093 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/932081d6-9a70-45ec-8738-f0b1265a2a84-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.554150 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/932081d6-9a70-45ec-8738-f0b1265a2a84-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.554195 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/932081d6-9a70-45ec-8738-f0b1265a2a84-config\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.554228 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/932081d6-9a70-45ec-8738-f0b1265a2a84-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.554281 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.554313 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7f8j\" (UniqueName: \"kubernetes.io/projected/932081d6-9a70-45ec-8738-f0b1265a2a84-kube-api-access-v7f8j\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.554335 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/932081d6-9a70-45ec-8738-f0b1265a2a84-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.554378 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/932081d6-9a70-45ec-8738-f0b1265a2a84-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.554651 5048 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.555096 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/932081d6-9a70-45ec-8738-f0b1265a2a84-config\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.555642 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/932081d6-9a70-45ec-8738-f0b1265a2a84-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.556101 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/932081d6-9a70-45ec-8738-f0b1265a2a84-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.560499 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/932081d6-9a70-45ec-8738-f0b1265a2a84-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.560573 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/932081d6-9a70-45ec-8738-f0b1265a2a84-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.560955 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/932081d6-9a70-45ec-8738-f0b1265a2a84-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.573424 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.578254 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7f8j\" (UniqueName: \"kubernetes.io/projected/932081d6-9a70-45ec-8738-f0b1265a2a84-kube-api-access-v7f8j\") pod \"ovsdbserver-sb-0\" (UID: \"932081d6-9a70-45ec-8738-f0b1265a2a84\") " pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:42 crc kubenswrapper[5048]: I1213 06:47:42.603284 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Dec 13 06:47:53 crc kubenswrapper[5048]: I1213 06:47:53.775453 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Dec 13 06:47:54 crc kubenswrapper[5048]: E1213 06:47:54.842581 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 13 06:47:54 crc kubenswrapper[5048]: E1213 06:47:54.842774 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gpj7x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-67pvp_openstack(856a2a68-3593-4af9-8285-2f80d731f123): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:47:54 crc kubenswrapper[5048]: E1213 06:47:54.844246 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-67pvp" podUID="856a2a68-3593-4af9-8285-2f80d731f123" Dec 13 06:47:54 crc kubenswrapper[5048]: E1213 06:47:54.867463 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-666b6646f7-67pvp" podUID="856a2a68-3593-4af9-8285-2f80d731f123" Dec 13 06:47:54 crc kubenswrapper[5048]: E1213 06:47:54.910807 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 13 06:47:54 crc kubenswrapper[5048]: E1213 06:47:54.910969 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cnjnl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-4bfsg_openstack(bcdd7fc3-c63b-4178-a6c7-906ec624fd78): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:47:54 crc kubenswrapper[5048]: E1213 06:47:54.912368 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-4bfsg" podUID="bcdd7fc3-c63b-4178-a6c7-906ec624fd78" Dec 13 06:47:55 crc kubenswrapper[5048]: I1213 06:47:55.236792 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Dec 13 06:47:55 crc kubenswrapper[5048]: E1213 06:47:55.968005 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Dec 13 06:47:55 crc kubenswrapper[5048]: E1213 06:47:55.968454 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fbhlw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(a7bbc535-10f7-44cc-89a6-cbb697149e4a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:47:55 crc kubenswrapper[5048]: E1213 06:47:55.970199 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="a7bbc535-10f7-44cc-89a6-cbb697149e4a" Dec 13 06:47:55 crc kubenswrapper[5048]: E1213 06:47:55.982249 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Dec 13 06:47:55 crc kubenswrapper[5048]: E1213 06:47:55.982415 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zdlfb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(e3a0bce1-8848-4ac7-a030-19640b952708): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:47:55 crc kubenswrapper[5048]: E1213 06:47:55.983621 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="e3a0bce1-8848-4ac7-a030-19640b952708" Dec 13 06:47:56 crc kubenswrapper[5048]: W1213 06:47:56.007635 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda5870a17_c845_46b8_a25c_8f8822a93cb8.slice/crio-046451f5491d4f772e7a6d579bbf6f0bb91749e507075abc0a017031de1f8aa1 WatchSource:0}: Error finding container 046451f5491d4f772e7a6d579bbf6f0bb91749e507075abc0a017031de1f8aa1: Status 404 returned error can't find the container with id 046451f5491d4f772e7a6d579bbf6f0bb91749e507075abc0a017031de1f8aa1 Dec 13 06:47:56 crc kubenswrapper[5048]: E1213 06:47:56.149577 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 13 06:47:56 crc kubenswrapper[5048]: E1213 06:47:56.149784 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7b4w9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-xdcwb_openstack(ad32555d-5b77-4554-92da-733514b660d0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:47:56 crc kubenswrapper[5048]: E1213 06:47:56.151032 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-xdcwb" podUID="ad32555d-5b77-4554-92da-733514b660d0" Dec 13 06:47:56 crc kubenswrapper[5048]: E1213 06:47:56.151903 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 13 06:47:56 crc kubenswrapper[5048]: E1213 06:47:56.152125 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2sl8x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-fvvwg_openstack(e70bef65-3e49-4e8c-8311-1c490a03fbe0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:47:56 crc kubenswrapper[5048]: E1213 06:47:56.154926 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" podUID="e70bef65-3e49-4e8c-8311-1c490a03fbe0" Dec 13 06:47:56 crc kubenswrapper[5048]: I1213 06:47:56.417354 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 13 06:47:56 crc kubenswrapper[5048]: I1213 06:47:56.876855 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"a5870a17-c845-46b8-a25c-8f8822a93cb8","Type":"ContainerStarted","Data":"046451f5491d4f772e7a6d579bbf6f0bb91749e507075abc0a017031de1f8aa1"} Dec 13 06:47:56 crc kubenswrapper[5048]: I1213 06:47:56.877811 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"c468ccc6-8384-4a69-ae41-cca18f9233e3","Type":"ContainerStarted","Data":"1d72e68445884498e715444c9e7711a841f473115cc7b2b561e41c96390bf143"} Dec 13 06:47:56 crc kubenswrapper[5048]: E1213 06:47:56.879497 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="a7bbc535-10f7-44cc-89a6-cbb697149e4a" Dec 13 06:47:56 crc kubenswrapper[5048]: E1213 06:47:56.879772 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="e3a0bce1-8848-4ac7-a030-19640b952708" Dec 13 06:47:56 crc kubenswrapper[5048]: E1213 06:47:56.879912 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" podUID="e70bef65-3e49-4e8c-8311-1c490a03fbe0" Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.540929 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-4bfsg" Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.551881 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-xdcwb" Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.641567 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ad32555d-5b77-4554-92da-733514b660d0-dns-svc\") pod \"ad32555d-5b77-4554-92da-733514b660d0\" (UID: \"ad32555d-5b77-4554-92da-733514b660d0\") " Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.641712 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad32555d-5b77-4554-92da-733514b660d0-config\") pod \"ad32555d-5b77-4554-92da-733514b660d0\" (UID: \"ad32555d-5b77-4554-92da-733514b660d0\") " Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.641755 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7b4w9\" (UniqueName: \"kubernetes.io/projected/ad32555d-5b77-4554-92da-733514b660d0-kube-api-access-7b4w9\") pod \"ad32555d-5b77-4554-92da-733514b660d0\" (UID: \"ad32555d-5b77-4554-92da-733514b660d0\") " Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.641792 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bcdd7fc3-c63b-4178-a6c7-906ec624fd78-config\") pod \"bcdd7fc3-c63b-4178-a6c7-906ec624fd78\" (UID: \"bcdd7fc3-c63b-4178-a6c7-906ec624fd78\") " Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.642076 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cnjnl\" (UniqueName: \"kubernetes.io/projected/bcdd7fc3-c63b-4178-a6c7-906ec624fd78-kube-api-access-cnjnl\") pod \"bcdd7fc3-c63b-4178-a6c7-906ec624fd78\" (UID: \"bcdd7fc3-c63b-4178-a6c7-906ec624fd78\") " Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.642912 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad32555d-5b77-4554-92da-733514b660d0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ad32555d-5b77-4554-92da-733514b660d0" (UID: "ad32555d-5b77-4554-92da-733514b660d0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.642937 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bcdd7fc3-c63b-4178-a6c7-906ec624fd78-config" (OuterVolumeSpecName: "config") pod "bcdd7fc3-c63b-4178-a6c7-906ec624fd78" (UID: "bcdd7fc3-c63b-4178-a6c7-906ec624fd78"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.643132 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad32555d-5b77-4554-92da-733514b660d0-config" (OuterVolumeSpecName: "config") pod "ad32555d-5b77-4554-92da-733514b660d0" (UID: "ad32555d-5b77-4554-92da-733514b660d0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.643569 5048 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ad32555d-5b77-4554-92da-733514b660d0-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.643596 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad32555d-5b77-4554-92da-733514b660d0-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.643610 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bcdd7fc3-c63b-4178-a6c7-906ec624fd78-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.646150 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bcdd7fc3-c63b-4178-a6c7-906ec624fd78-kube-api-access-cnjnl" (OuterVolumeSpecName: "kube-api-access-cnjnl") pod "bcdd7fc3-c63b-4178-a6c7-906ec624fd78" (UID: "bcdd7fc3-c63b-4178-a6c7-906ec624fd78"). InnerVolumeSpecName "kube-api-access-cnjnl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.647903 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad32555d-5b77-4554-92da-733514b660d0-kube-api-access-7b4w9" (OuterVolumeSpecName: "kube-api-access-7b4w9") pod "ad32555d-5b77-4554-92da-733514b660d0" (UID: "ad32555d-5b77-4554-92da-733514b660d0"). InnerVolumeSpecName "kube-api-access-7b4w9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.745769 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7b4w9\" (UniqueName: \"kubernetes.io/projected/ad32555d-5b77-4554-92da-733514b660d0-kube-api-access-7b4w9\") on node \"crc\" DevicePath \"\"" Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.745806 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cnjnl\" (UniqueName: \"kubernetes.io/projected/bcdd7fc3-c63b-4178-a6c7-906ec624fd78-kube-api-access-cnjnl\") on node \"crc\" DevicePath \"\"" Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.893484 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-4bfsg" event={"ID":"bcdd7fc3-c63b-4178-a6c7-906ec624fd78","Type":"ContainerDied","Data":"99f930b200e6008c11d334b5a8a591168c31ea8602f154e5f9173d64a5880a7b"} Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.893568 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-4bfsg" Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.897103 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"a5870a17-c845-46b8-a25c-8f8822a93cb8","Type":"ContainerStarted","Data":"b2ca70b368774899050c6fa4f0f329d7bb9028048ccc1a30d75fdf3f209eecac"} Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.901608 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa","Type":"ContainerStarted","Data":"d17c7df73cb8beb3e19cfbedab9ed48efc9712f041ac5db6a695139a7c33a40c"} Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.908503 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-xdcwb" event={"ID":"ad32555d-5b77-4554-92da-733514b660d0","Type":"ContainerDied","Data":"74ec75393f16942dff40387838d8c326fc5e952abc768ed8348a2e9eb26677f8"} Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.908606 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-xdcwb" Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.912889 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"97a8bb68-6317-4a75-bf87-f7d6c6cb0023","Type":"ContainerStarted","Data":"48fab155b42031a7f20ac61fc1eb8c77b442ac3fd7840f80718b9e14204f7bb2"} Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.961273 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-2sh28"] Dec 13 06:47:58 crc kubenswrapper[5048]: I1213 06:47:58.996857 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4bfsg"] Dec 13 06:47:59 crc kubenswrapper[5048]: I1213 06:47:59.006846 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4bfsg"] Dec 13 06:47:59 crc kubenswrapper[5048]: I1213 06:47:59.033334 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-xdcwb"] Dec 13 06:47:59 crc kubenswrapper[5048]: I1213 06:47:59.047947 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-xdcwb"] Dec 13 06:47:59 crc kubenswrapper[5048]: I1213 06:47:59.077643 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 13 06:47:59 crc kubenswrapper[5048]: W1213 06:47:59.090565 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod932081d6_9a70_45ec_8738_f0b1265a2a84.slice/crio-7b68779db2f1d6aa19bf16eace3d8d50915e81ff11b43b0f6e90bf3864df7487 WatchSource:0}: Error finding container 7b68779db2f1d6aa19bf16eace3d8d50915e81ff11b43b0f6e90bf3864df7487: Status 404 returned error can't find the container with id 7b68779db2f1d6aa19bf16eace3d8d50915e81ff11b43b0f6e90bf3864df7487 Dec 13 06:47:59 crc kubenswrapper[5048]: I1213 06:47:59.167619 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 13 06:47:59 crc kubenswrapper[5048]: I1213 06:47:59.923682 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-2sh28" event={"ID":"a8258a39-dbbb-4672-9d88-22749f0c9563","Type":"ContainerStarted","Data":"61347f002279053d4d456dd1c6c67ca7fe48117a058a98d34eee3e416402f874"} Dec 13 06:47:59 crc kubenswrapper[5048]: I1213 06:47:59.925404 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"932081d6-9a70-45ec-8738-f0b1265a2a84","Type":"ContainerStarted","Data":"7b68779db2f1d6aa19bf16eace3d8d50915e81ff11b43b0f6e90bf3864df7487"} Dec 13 06:48:00 crc kubenswrapper[5048]: I1213 06:48:00.066390 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-x5dfv"] Dec 13 06:48:00 crc kubenswrapper[5048]: W1213 06:48:00.072775 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf1e9604_d8b9_4e08_8eb1_9c30b73f6d70.slice/crio-7fea16f0c5eea857225b27b0bd9165495194633939c86e30e7c157abbd46ec9c WatchSource:0}: Error finding container 7fea16f0c5eea857225b27b0bd9165495194633939c86e30e7c157abbd46ec9c: Status 404 returned error can't find the container with id 7fea16f0c5eea857225b27b0bd9165495194633939c86e30e7c157abbd46ec9c Dec 13 06:48:00 crc kubenswrapper[5048]: I1213 06:48:00.589546 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad32555d-5b77-4554-92da-733514b660d0" path="/var/lib/kubelet/pods/ad32555d-5b77-4554-92da-733514b660d0/volumes" Dec 13 06:48:00 crc kubenswrapper[5048]: I1213 06:48:00.590345 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bcdd7fc3-c63b-4178-a6c7-906ec624fd78" path="/var/lib/kubelet/pods/bcdd7fc3-c63b-4178-a6c7-906ec624fd78/volumes" Dec 13 06:48:00 crc kubenswrapper[5048]: I1213 06:48:00.933597 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"8c7d8f14-b731-4408-b506-2dac81b2a0a7","Type":"ContainerStarted","Data":"9af7abc8a15f2bbb7ce6e212784493c6bf8034225bfdf83c9aaf62626e5981d1"} Dec 13 06:48:00 crc kubenswrapper[5048]: I1213 06:48:00.934925 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-x5dfv" event={"ID":"df1e9604-d8b9-4e08-8eb1-9c30b73f6d70","Type":"ContainerStarted","Data":"7fea16f0c5eea857225b27b0bd9165495194633939c86e30e7c157abbd46ec9c"} Dec 13 06:48:04 crc kubenswrapper[5048]: I1213 06:48:04.970655 5048 generic.go:334] "Generic (PLEG): container finished" podID="a5870a17-c845-46b8-a25c-8f8822a93cb8" containerID="b2ca70b368774899050c6fa4f0f329d7bb9028048ccc1a30d75fdf3f209eecac" exitCode=0 Dec 13 06:48:04 crc kubenswrapper[5048]: I1213 06:48:04.970894 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"a5870a17-c845-46b8-a25c-8f8822a93cb8","Type":"ContainerDied","Data":"b2ca70b368774899050c6fa4f0f329d7bb9028048ccc1a30d75fdf3f209eecac"} Dec 13 06:48:04 crc kubenswrapper[5048]: I1213 06:48:04.975956 5048 generic.go:334] "Generic (PLEG): container finished" podID="be46506c-41d0-4b9f-92bd-f34eb6d6a1aa" containerID="d17c7df73cb8beb3e19cfbedab9ed48efc9712f041ac5db6a695139a7c33a40c" exitCode=0 Dec 13 06:48:04 crc kubenswrapper[5048]: I1213 06:48:04.976076 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa","Type":"ContainerDied","Data":"d17c7df73cb8beb3e19cfbedab9ed48efc9712f041ac5db6a695139a7c33a40c"} Dec 13 06:48:05 crc kubenswrapper[5048]: I1213 06:48:05.990047 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"c468ccc6-8384-4a69-ae41-cca18f9233e3","Type":"ContainerStarted","Data":"0ba042d5c2f197b8def6cea46e52b2f76ea01fedd96610e7db1e9d39eb2e485e"} Dec 13 06:48:05 crc kubenswrapper[5048]: I1213 06:48:05.990698 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Dec 13 06:48:06 crc kubenswrapper[5048]: I1213 06:48:06.017610 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=26.314524592 podStartE2EDuration="33.017589293s" podCreationTimestamp="2025-12-13 06:47:33 +0000 UTC" firstStartedPulling="2025-12-13 06:47:56.020454172 +0000 UTC m=+1109.887048753" lastFinishedPulling="2025-12-13 06:48:02.723518873 +0000 UTC m=+1116.590113454" observedRunningTime="2025-12-13 06:48:06.008958809 +0000 UTC m=+1119.875553390" watchObservedRunningTime="2025-12-13 06:48:06.017589293 +0000 UTC m=+1119.884183874" Dec 13 06:48:07 crc kubenswrapper[5048]: I1213 06:48:07.000466 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-2sh28" event={"ID":"a8258a39-dbbb-4672-9d88-22749f0c9563","Type":"ContainerStarted","Data":"1399e6dc159adf18ca442120493a7534b107b4e8b793f8fc4ec910e54e770654"} Dec 13 06:48:07 crc kubenswrapper[5048]: I1213 06:48:07.006092 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-2sh28" Dec 13 06:48:07 crc kubenswrapper[5048]: I1213 06:48:07.008818 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"97a8bb68-6317-4a75-bf87-f7d6c6cb0023","Type":"ContainerStarted","Data":"b21d600cfe018fabe4afb069145ecc9205ae4c9d25c56bde4b7f4f3dd6e059c2"} Dec 13 06:48:07 crc kubenswrapper[5048]: I1213 06:48:07.011340 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Dec 13 06:48:07 crc kubenswrapper[5048]: I1213 06:48:07.021587 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"8c7d8f14-b731-4408-b506-2dac81b2a0a7","Type":"ContainerStarted","Data":"d456a0d5a6cdd1810613393ff309197b0feb4769f968dc1b1389d7be2fac43f2"} Dec 13 06:48:07 crc kubenswrapper[5048]: I1213 06:48:07.021770 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"932081d6-9a70-45ec-8738-f0b1265a2a84","Type":"ContainerStarted","Data":"e9a0422237dc2ca72b699418ac14a976e51c7c8de4548be383f220b0f93013c0"} Dec 13 06:48:07 crc kubenswrapper[5048]: I1213 06:48:07.024602 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"a5870a17-c845-46b8-a25c-8f8822a93cb8","Type":"ContainerStarted","Data":"00652e321aa2018f8db2bb6f4b5d5fb28eff2320f95633e0883b41af9c3b16b5"} Dec 13 06:48:07 crc kubenswrapper[5048]: I1213 06:48:07.029731 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"be46506c-41d0-4b9f-92bd-f34eb6d6a1aa","Type":"ContainerStarted","Data":"bf4f93657322f55150cfe2957a9ad2529498d173311fed3b54c2379ec07445d6"} Dec 13 06:48:07 crc kubenswrapper[5048]: I1213 06:48:07.031399 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-2sh28" podStartSLOduration=22.327284086 podStartE2EDuration="29.031387764s" podCreationTimestamp="2025-12-13 06:47:38 +0000 UTC" firstStartedPulling="2025-12-13 06:47:58.976163828 +0000 UTC m=+1112.842758409" lastFinishedPulling="2025-12-13 06:48:05.680267496 +0000 UTC m=+1119.546862087" observedRunningTime="2025-12-13 06:48:07.023638474 +0000 UTC m=+1120.890233075" watchObservedRunningTime="2025-12-13 06:48:07.031387764 +0000 UTC m=+1120.897982345" Dec 13 06:48:07 crc kubenswrapper[5048]: I1213 06:48:07.032801 5048 generic.go:334] "Generic (PLEG): container finished" podID="856a2a68-3593-4af9-8285-2f80d731f123" containerID="3cf9e3657056b6f4f854217eec5225319ee6ecd3981719478c9f928dff936e2a" exitCode=0 Dec 13 06:48:07 crc kubenswrapper[5048]: I1213 06:48:07.032844 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-67pvp" event={"ID":"856a2a68-3593-4af9-8285-2f80d731f123","Type":"ContainerDied","Data":"3cf9e3657056b6f4f854217eec5225319ee6ecd3981719478c9f928dff936e2a"} Dec 13 06:48:07 crc kubenswrapper[5048]: I1213 06:48:07.036030 5048 generic.go:334] "Generic (PLEG): container finished" podID="df1e9604-d8b9-4e08-8eb1-9c30b73f6d70" containerID="b5354cfbfa2262a90b5beec77c110a6277fa57273d4f22d1534a53a0c6b23d7a" exitCode=0 Dec 13 06:48:07 crc kubenswrapper[5048]: I1213 06:48:07.036113 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-x5dfv" event={"ID":"df1e9604-d8b9-4e08-8eb1-9c30b73f6d70","Type":"ContainerDied","Data":"b5354cfbfa2262a90b5beec77c110a6277fa57273d4f22d1534a53a0c6b23d7a"} Dec 13 06:48:07 crc kubenswrapper[5048]: I1213 06:48:07.053414 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=24.829921434 podStartE2EDuration="32.053393591s" podCreationTimestamp="2025-12-13 06:47:35 +0000 UTC" firstStartedPulling="2025-12-13 06:47:58.456203274 +0000 UTC m=+1112.322797865" lastFinishedPulling="2025-12-13 06:48:05.679675431 +0000 UTC m=+1119.546270022" observedRunningTime="2025-12-13 06:48:07.047736368 +0000 UTC m=+1120.914330969" watchObservedRunningTime="2025-12-13 06:48:07.053393591 +0000 UTC m=+1120.919988172" Dec 13 06:48:07 crc kubenswrapper[5048]: I1213 06:48:07.070857 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=18.405702048 podStartE2EDuration="36.070838654s" podCreationTimestamp="2025-12-13 06:47:31 +0000 UTC" firstStartedPulling="2025-12-13 06:47:40.964194967 +0000 UTC m=+1094.830789548" lastFinishedPulling="2025-12-13 06:47:58.629331573 +0000 UTC m=+1112.495926154" observedRunningTime="2025-12-13 06:48:07.069691103 +0000 UTC m=+1120.936285694" watchObservedRunningTime="2025-12-13 06:48:07.070838654 +0000 UTC m=+1120.937433235" Dec 13 06:48:07 crc kubenswrapper[5048]: I1213 06:48:07.131088 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=34.50971293 podStartE2EDuration="37.131064829s" podCreationTimestamp="2025-12-13 06:47:30 +0000 UTC" firstStartedPulling="2025-12-13 06:47:56.01150982 +0000 UTC m=+1109.878104401" lastFinishedPulling="2025-12-13 06:47:58.632861719 +0000 UTC m=+1112.499456300" observedRunningTime="2025-12-13 06:48:07.11008684 +0000 UTC m=+1120.976681421" watchObservedRunningTime="2025-12-13 06:48:07.131064829 +0000 UTC m=+1120.997659410" Dec 13 06:48:08 crc kubenswrapper[5048]: I1213 06:48:08.050056 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-x5dfv" event={"ID":"df1e9604-d8b9-4e08-8eb1-9c30b73f6d70","Type":"ContainerStarted","Data":"fba7a1ad9182be92250385a939a9f6603cb44fd00b3080ee1246433782f4de50"} Dec 13 06:48:08 crc kubenswrapper[5048]: I1213 06:48:08.052811 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-67pvp" event={"ID":"856a2a68-3593-4af9-8285-2f80d731f123","Type":"ContainerStarted","Data":"0962d18b3458f3186a428be39d45f4ed94f92988127dcbf95ad539344c689e19"} Dec 13 06:48:08 crc kubenswrapper[5048]: I1213 06:48:08.075344 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-67pvp" podStartSLOduration=3.365337761 podStartE2EDuration="40.075327943s" podCreationTimestamp="2025-12-13 06:47:28 +0000 UTC" firstStartedPulling="2025-12-13 06:47:29.603953206 +0000 UTC m=+1083.470547787" lastFinishedPulling="2025-12-13 06:48:06.313943388 +0000 UTC m=+1120.180537969" observedRunningTime="2025-12-13 06:48:08.070811809 +0000 UTC m=+1121.937406391" watchObservedRunningTime="2025-12-13 06:48:08.075327943 +0000 UTC m=+1121.941922514" Dec 13 06:48:09 crc kubenswrapper[5048]: I1213 06:48:09.062013 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-x5dfv" event={"ID":"df1e9604-d8b9-4e08-8eb1-9c30b73f6d70","Type":"ContainerStarted","Data":"0785fcad716b1a8dc30a920ccfbf80d2b52a404a15641fb716c0217ce349e850"} Dec 13 06:48:09 crc kubenswrapper[5048]: I1213 06:48:09.062665 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:48:09 crc kubenswrapper[5048]: I1213 06:48:09.066506 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-666b6646f7-67pvp" Dec 13 06:48:10 crc kubenswrapper[5048]: I1213 06:48:10.071127 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"8c7d8f14-b731-4408-b506-2dac81b2a0a7","Type":"ContainerStarted","Data":"dd1cd063355cbacef37b2e1f929220dccb3d25f70af63b3cb7ab7b31e673cde5"} Dec 13 06:48:10 crc kubenswrapper[5048]: I1213 06:48:10.074706 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"932081d6-9a70-45ec-8738-f0b1265a2a84","Type":"ContainerStarted","Data":"d47d35e87e6bbd2a19ea059979a74f5765c7fee03ddc5e862417b10377dcba18"} Dec 13 06:48:10 crc kubenswrapper[5048]: I1213 06:48:10.074755 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:48:10 crc kubenswrapper[5048]: I1213 06:48:10.092990 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-x5dfv" podStartSLOduration=26.571423656 podStartE2EDuration="32.092968864s" podCreationTimestamp="2025-12-13 06:47:38 +0000 UTC" firstStartedPulling="2025-12-13 06:48:00.07548964 +0000 UTC m=+1113.942084221" lastFinishedPulling="2025-12-13 06:48:05.597034848 +0000 UTC m=+1119.463629429" observedRunningTime="2025-12-13 06:48:09.085086143 +0000 UTC m=+1122.951680724" watchObservedRunningTime="2025-12-13 06:48:10.092968864 +0000 UTC m=+1123.959563445" Dec 13 06:48:10 crc kubenswrapper[5048]: I1213 06:48:10.097476 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=22.387076418 podStartE2EDuration="32.097439095s" podCreationTimestamp="2025-12-13 06:47:38 +0000 UTC" firstStartedPulling="2025-12-13 06:48:00.056385521 +0000 UTC m=+1113.922980102" lastFinishedPulling="2025-12-13 06:48:09.766748188 +0000 UTC m=+1123.633342779" observedRunningTime="2025-12-13 06:48:10.088685217 +0000 UTC m=+1123.955279818" watchObservedRunningTime="2025-12-13 06:48:10.097439095 +0000 UTC m=+1123.964033676" Dec 13 06:48:10 crc kubenswrapper[5048]: I1213 06:48:10.108989 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=18.427608312 podStartE2EDuration="29.108968407s" podCreationTimestamp="2025-12-13 06:47:41 +0000 UTC" firstStartedPulling="2025-12-13 06:47:59.093980966 +0000 UTC m=+1112.960575547" lastFinishedPulling="2025-12-13 06:48:09.775341051 +0000 UTC m=+1123.641935642" observedRunningTime="2025-12-13 06:48:10.107800776 +0000 UTC m=+1123.974395367" watchObservedRunningTime="2025-12-13 06:48:10.108968407 +0000 UTC m=+1123.975562988" Dec 13 06:48:12 crc kubenswrapper[5048]: I1213 06:48:12.091099 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" event={"ID":"e70bef65-3e49-4e8c-8311-1c490a03fbe0","Type":"ContainerStarted","Data":"7bba1a3f4ced5a87b6478d79016f90b501dea92d78398496f46f9cac138df0e9"} Dec 13 06:48:12 crc kubenswrapper[5048]: I1213 06:48:12.214544 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Dec 13 06:48:12 crc kubenswrapper[5048]: I1213 06:48:12.214915 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Dec 13 06:48:12 crc kubenswrapper[5048]: I1213 06:48:12.603821 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Dec 13 06:48:12 crc kubenswrapper[5048]: I1213 06:48:12.604484 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Dec 13 06:48:12 crc kubenswrapper[5048]: I1213 06:48:12.652403 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Dec 13 06:48:12 crc kubenswrapper[5048]: I1213 06:48:12.705699 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Dec 13 06:48:12 crc kubenswrapper[5048]: I1213 06:48:12.745426 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.099416 5048 generic.go:334] "Generic (PLEG): container finished" podID="e70bef65-3e49-4e8c-8311-1c490a03fbe0" containerID="7bba1a3f4ced5a87b6478d79016f90b501dea92d78398496f46f9cac138df0e9" exitCode=0 Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.100297 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" event={"ID":"e70bef65-3e49-4e8c-8311-1c490a03fbe0","Type":"ContainerDied","Data":"7bba1a3f4ced5a87b6478d79016f90b501dea92d78398496f46f9cac138df0e9"} Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.100506 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.229199 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.240330 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.240384 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.240427 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.351190 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.426522 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-67pvp"] Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.427172 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-67pvp" podUID="856a2a68-3593-4af9-8285-2f80d731f123" containerName="dnsmasq-dns" containerID="cri-o://0962d18b3458f3186a428be39d45f4ed94f92988127dcbf95ad539344c689e19" gracePeriod=10 Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.429904 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-666b6646f7-67pvp" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.442591 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.477110 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-nrmr8"] Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.478709 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-nrmr8" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.480615 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.523749 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-nrmr8"] Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.597247 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-l4wtt"] Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.612944 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-l4wtt" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.614374 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0fe36828-9531-4104-bd23-fdc492007087-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-nrmr8\" (UID: \"0fe36828-9531-4104-bd23-fdc492007087\") " pod="openstack/dnsmasq-dns-5bf47b49b7-nrmr8" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.614410 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0fe36828-9531-4104-bd23-fdc492007087-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-nrmr8\" (UID: \"0fe36828-9531-4104-bd23-fdc492007087\") " pod="openstack/dnsmasq-dns-5bf47b49b7-nrmr8" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.614537 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0fe36828-9531-4104-bd23-fdc492007087-config\") pod \"dnsmasq-dns-5bf47b49b7-nrmr8\" (UID: \"0fe36828-9531-4104-bd23-fdc492007087\") " pod="openstack/dnsmasq-dns-5bf47b49b7-nrmr8" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.614567 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49flj\" (UniqueName: \"kubernetes.io/projected/0fe36828-9531-4104-bd23-fdc492007087-kube-api-access-49flj\") pod \"dnsmasq-dns-5bf47b49b7-nrmr8\" (UID: \"0fe36828-9531-4104-bd23-fdc492007087\") " pod="openstack/dnsmasq-dns-5bf47b49b7-nrmr8" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.616926 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.625347 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-l4wtt"] Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.712077 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-fvvwg"] Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.716725 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0da0172c-5a76-44fc-8ff1-e694ba1e083b-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-l4wtt\" (UID: \"0da0172c-5a76-44fc-8ff1-e694ba1e083b\") " pod="openstack/ovn-controller-metrics-l4wtt" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.716832 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0da0172c-5a76-44fc-8ff1-e694ba1e083b-config\") pod \"ovn-controller-metrics-l4wtt\" (UID: \"0da0172c-5a76-44fc-8ff1-e694ba1e083b\") " pod="openstack/ovn-controller-metrics-l4wtt" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.716885 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0da0172c-5a76-44fc-8ff1-e694ba1e083b-combined-ca-bundle\") pod \"ovn-controller-metrics-l4wtt\" (UID: \"0da0172c-5a76-44fc-8ff1-e694ba1e083b\") " pod="openstack/ovn-controller-metrics-l4wtt" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.716925 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rx2kl\" (UniqueName: \"kubernetes.io/projected/0da0172c-5a76-44fc-8ff1-e694ba1e083b-kube-api-access-rx2kl\") pod \"ovn-controller-metrics-l4wtt\" (UID: \"0da0172c-5a76-44fc-8ff1-e694ba1e083b\") " pod="openstack/ovn-controller-metrics-l4wtt" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.717009 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0fe36828-9531-4104-bd23-fdc492007087-config\") pod \"dnsmasq-dns-5bf47b49b7-nrmr8\" (UID: \"0fe36828-9531-4104-bd23-fdc492007087\") " pod="openstack/dnsmasq-dns-5bf47b49b7-nrmr8" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.717037 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49flj\" (UniqueName: \"kubernetes.io/projected/0fe36828-9531-4104-bd23-fdc492007087-kube-api-access-49flj\") pod \"dnsmasq-dns-5bf47b49b7-nrmr8\" (UID: \"0fe36828-9531-4104-bd23-fdc492007087\") " pod="openstack/dnsmasq-dns-5bf47b49b7-nrmr8" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.717063 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/0da0172c-5a76-44fc-8ff1-e694ba1e083b-ovn-rundir\") pod \"ovn-controller-metrics-l4wtt\" (UID: \"0da0172c-5a76-44fc-8ff1-e694ba1e083b\") " pod="openstack/ovn-controller-metrics-l4wtt" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.717105 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/0da0172c-5a76-44fc-8ff1-e694ba1e083b-ovs-rundir\") pod \"ovn-controller-metrics-l4wtt\" (UID: \"0da0172c-5a76-44fc-8ff1-e694ba1e083b\") " pod="openstack/ovn-controller-metrics-l4wtt" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.717174 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0fe36828-9531-4104-bd23-fdc492007087-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-nrmr8\" (UID: \"0fe36828-9531-4104-bd23-fdc492007087\") " pod="openstack/dnsmasq-dns-5bf47b49b7-nrmr8" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.717247 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0fe36828-9531-4104-bd23-fdc492007087-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-nrmr8\" (UID: \"0fe36828-9531-4104-bd23-fdc492007087\") " pod="openstack/dnsmasq-dns-5bf47b49b7-nrmr8" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.718348 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0fe36828-9531-4104-bd23-fdc492007087-config\") pod \"dnsmasq-dns-5bf47b49b7-nrmr8\" (UID: \"0fe36828-9531-4104-bd23-fdc492007087\") " pod="openstack/dnsmasq-dns-5bf47b49b7-nrmr8" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.722880 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0fe36828-9531-4104-bd23-fdc492007087-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-nrmr8\" (UID: \"0fe36828-9531-4104-bd23-fdc492007087\") " pod="openstack/dnsmasq-dns-5bf47b49b7-nrmr8" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.727785 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0fe36828-9531-4104-bd23-fdc492007087-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-nrmr8\" (UID: \"0fe36828-9531-4104-bd23-fdc492007087\") " pod="openstack/dnsmasq-dns-5bf47b49b7-nrmr8" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.755465 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49flj\" (UniqueName: \"kubernetes.io/projected/0fe36828-9531-4104-bd23-fdc492007087-kube-api-access-49flj\") pod \"dnsmasq-dns-5bf47b49b7-nrmr8\" (UID: \"0fe36828-9531-4104-bd23-fdc492007087\") " pod="openstack/dnsmasq-dns-5bf47b49b7-nrmr8" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.795103 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-dwpg4"] Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.796964 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-dwpg4" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.806670 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.809079 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-dwpg4"] Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.818355 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0da0172c-5a76-44fc-8ff1-e694ba1e083b-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-l4wtt\" (UID: \"0da0172c-5a76-44fc-8ff1-e694ba1e083b\") " pod="openstack/ovn-controller-metrics-l4wtt" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.819684 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0da0172c-5a76-44fc-8ff1-e694ba1e083b-config\") pod \"ovn-controller-metrics-l4wtt\" (UID: \"0da0172c-5a76-44fc-8ff1-e694ba1e083b\") " pod="openstack/ovn-controller-metrics-l4wtt" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.821117 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0da0172c-5a76-44fc-8ff1-e694ba1e083b-combined-ca-bundle\") pod \"ovn-controller-metrics-l4wtt\" (UID: \"0da0172c-5a76-44fc-8ff1-e694ba1e083b\") " pod="openstack/ovn-controller-metrics-l4wtt" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.821244 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rx2kl\" (UniqueName: \"kubernetes.io/projected/0da0172c-5a76-44fc-8ff1-e694ba1e083b-kube-api-access-rx2kl\") pod \"ovn-controller-metrics-l4wtt\" (UID: \"0da0172c-5a76-44fc-8ff1-e694ba1e083b\") " pod="openstack/ovn-controller-metrics-l4wtt" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.821406 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/0da0172c-5a76-44fc-8ff1-e694ba1e083b-ovn-rundir\") pod \"ovn-controller-metrics-l4wtt\" (UID: \"0da0172c-5a76-44fc-8ff1-e694ba1e083b\") " pod="openstack/ovn-controller-metrics-l4wtt" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.821546 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/0da0172c-5a76-44fc-8ff1-e694ba1e083b-ovs-rundir\") pod \"ovn-controller-metrics-l4wtt\" (UID: \"0da0172c-5a76-44fc-8ff1-e694ba1e083b\") " pod="openstack/ovn-controller-metrics-l4wtt" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.822020 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/0da0172c-5a76-44fc-8ff1-e694ba1e083b-ovs-rundir\") pod \"ovn-controller-metrics-l4wtt\" (UID: \"0da0172c-5a76-44fc-8ff1-e694ba1e083b\") " pod="openstack/ovn-controller-metrics-l4wtt" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.821059 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0da0172c-5a76-44fc-8ff1-e694ba1e083b-config\") pod \"ovn-controller-metrics-l4wtt\" (UID: \"0da0172c-5a76-44fc-8ff1-e694ba1e083b\") " pod="openstack/ovn-controller-metrics-l4wtt" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.825536 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.825653 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/0da0172c-5a76-44fc-8ff1-e694ba1e083b-ovn-rundir\") pod \"ovn-controller-metrics-l4wtt\" (UID: \"0da0172c-5a76-44fc-8ff1-e694ba1e083b\") " pod="openstack/ovn-controller-metrics-l4wtt" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.828231 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.828242 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0da0172c-5a76-44fc-8ff1-e694ba1e083b-combined-ca-bundle\") pod \"ovn-controller-metrics-l4wtt\" (UID: \"0da0172c-5a76-44fc-8ff1-e694ba1e083b\") " pod="openstack/ovn-controller-metrics-l4wtt" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.838882 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.849987 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0da0172c-5a76-44fc-8ff1-e694ba1e083b-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-l4wtt\" (UID: \"0da0172c-5a76-44fc-8ff1-e694ba1e083b\") " pod="openstack/ovn-controller-metrics-l4wtt" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.850521 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.850547 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.850692 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.851211 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-nfgdh" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.878095 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rx2kl\" (UniqueName: \"kubernetes.io/projected/0da0172c-5a76-44fc-8ff1-e694ba1e083b-kube-api-access-rx2kl\") pod \"ovn-controller-metrics-l4wtt\" (UID: \"0da0172c-5a76-44fc-8ff1-e694ba1e083b\") " pod="openstack/ovn-controller-metrics-l4wtt" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.903762 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-nrmr8" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.922817 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rpzt6\" (UniqueName: \"kubernetes.io/projected/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-kube-api-access-rpzt6\") pod \"dnsmasq-dns-8554648995-dwpg4\" (UID: \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\") " pod="openstack/dnsmasq-dns-8554648995-dwpg4" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.922891 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-config\") pod \"dnsmasq-dns-8554648995-dwpg4\" (UID: \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\") " pod="openstack/dnsmasq-dns-8554648995-dwpg4" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.922937 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-dns-svc\") pod \"dnsmasq-dns-8554648995-dwpg4\" (UID: \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\") " pod="openstack/dnsmasq-dns-8554648995-dwpg4" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.922975 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-dwpg4\" (UID: \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\") " pod="openstack/dnsmasq-dns-8554648995-dwpg4" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.923000 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-dwpg4\" (UID: \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\") " pod="openstack/dnsmasq-dns-8554648995-dwpg4" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.937985 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-l4wtt" Dec 13 06:48:13 crc kubenswrapper[5048]: I1213 06:48:13.990182 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-67pvp" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.024819 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-dns-svc\") pod \"dnsmasq-dns-8554648995-dwpg4\" (UID: \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\") " pod="openstack/dnsmasq-dns-8554648995-dwpg4" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.025152 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f002b19-41eb-4af1-a6c1-a4639e81417e-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"3f002b19-41eb-4af1-a6c1-a4639e81417e\") " pod="openstack/ovn-northd-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.025174 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-dwpg4\" (UID: \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\") " pod="openstack/dnsmasq-dns-8554648995-dwpg4" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.025197 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-dwpg4\" (UID: \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\") " pod="openstack/dnsmasq-dns-8554648995-dwpg4" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.025242 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f002b19-41eb-4af1-a6c1-a4639e81417e-scripts\") pod \"ovn-northd-0\" (UID: \"3f002b19-41eb-4af1-a6c1-a4639e81417e\") " pod="openstack/ovn-northd-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.025267 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f002b19-41eb-4af1-a6c1-a4639e81417e-config\") pod \"ovn-northd-0\" (UID: \"3f002b19-41eb-4af1-a6c1-a4639e81417e\") " pod="openstack/ovn-northd-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.025308 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f002b19-41eb-4af1-a6c1-a4639e81417e-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"3f002b19-41eb-4af1-a6c1-a4639e81417e\") " pod="openstack/ovn-northd-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.025339 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rpzt6\" (UniqueName: \"kubernetes.io/projected/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-kube-api-access-rpzt6\") pod \"dnsmasq-dns-8554648995-dwpg4\" (UID: \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\") " pod="openstack/dnsmasq-dns-8554648995-dwpg4" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.025365 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f002b19-41eb-4af1-a6c1-a4639e81417e-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"3f002b19-41eb-4af1-a6c1-a4639e81417e\") " pod="openstack/ovn-northd-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.025384 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4h6s\" (UniqueName: \"kubernetes.io/projected/3f002b19-41eb-4af1-a6c1-a4639e81417e-kube-api-access-c4h6s\") pod \"ovn-northd-0\" (UID: \"3f002b19-41eb-4af1-a6c1-a4639e81417e\") " pod="openstack/ovn-northd-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.025406 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-config\") pod \"dnsmasq-dns-8554648995-dwpg4\" (UID: \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\") " pod="openstack/dnsmasq-dns-8554648995-dwpg4" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.025469 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3f002b19-41eb-4af1-a6c1-a4639e81417e-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"3f002b19-41eb-4af1-a6c1-a4639e81417e\") " pod="openstack/ovn-northd-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.026494 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-config\") pod \"dnsmasq-dns-8554648995-dwpg4\" (UID: \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\") " pod="openstack/dnsmasq-dns-8554648995-dwpg4" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.026601 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-dwpg4\" (UID: \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\") " pod="openstack/dnsmasq-dns-8554648995-dwpg4" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.026809 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-dwpg4\" (UID: \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\") " pod="openstack/dnsmasq-dns-8554648995-dwpg4" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.027005 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-dns-svc\") pod \"dnsmasq-dns-8554648995-dwpg4\" (UID: \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\") " pod="openstack/dnsmasq-dns-8554648995-dwpg4" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.050740 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rpzt6\" (UniqueName: \"kubernetes.io/projected/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-kube-api-access-rpzt6\") pod \"dnsmasq-dns-8554648995-dwpg4\" (UID: \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\") " pod="openstack/dnsmasq-dns-8554648995-dwpg4" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.126499 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" event={"ID":"e70bef65-3e49-4e8c-8311-1c490a03fbe0","Type":"ContainerStarted","Data":"2fcdbbf57702d191125b922a5d46f0306170c719650d88372f50f40f6aed9c25"} Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.126656 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" podUID="e70bef65-3e49-4e8c-8311-1c490a03fbe0" containerName="dnsmasq-dns" containerID="cri-o://2fcdbbf57702d191125b922a5d46f0306170c719650d88372f50f40f6aed9c25" gracePeriod=10 Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.126965 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.127763 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-dwpg4" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.127814 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/856a2a68-3593-4af9-8285-2f80d731f123-config\") pod \"856a2a68-3593-4af9-8285-2f80d731f123\" (UID: \"856a2a68-3593-4af9-8285-2f80d731f123\") " Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.127923 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gpj7x\" (UniqueName: \"kubernetes.io/projected/856a2a68-3593-4af9-8285-2f80d731f123-kube-api-access-gpj7x\") pod \"856a2a68-3593-4af9-8285-2f80d731f123\" (UID: \"856a2a68-3593-4af9-8285-2f80d731f123\") " Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.127965 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/856a2a68-3593-4af9-8285-2f80d731f123-dns-svc\") pod \"856a2a68-3593-4af9-8285-2f80d731f123\" (UID: \"856a2a68-3593-4af9-8285-2f80d731f123\") " Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.128268 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f002b19-41eb-4af1-a6c1-a4639e81417e-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"3f002b19-41eb-4af1-a6c1-a4639e81417e\") " pod="openstack/ovn-northd-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.128293 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4h6s\" (UniqueName: \"kubernetes.io/projected/3f002b19-41eb-4af1-a6c1-a4639e81417e-kube-api-access-c4h6s\") pod \"ovn-northd-0\" (UID: \"3f002b19-41eb-4af1-a6c1-a4639e81417e\") " pod="openstack/ovn-northd-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.128323 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3f002b19-41eb-4af1-a6c1-a4639e81417e-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"3f002b19-41eb-4af1-a6c1-a4639e81417e\") " pod="openstack/ovn-northd-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.128357 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f002b19-41eb-4af1-a6c1-a4639e81417e-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"3f002b19-41eb-4af1-a6c1-a4639e81417e\") " pod="openstack/ovn-northd-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.128397 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f002b19-41eb-4af1-a6c1-a4639e81417e-scripts\") pod \"ovn-northd-0\" (UID: \"3f002b19-41eb-4af1-a6c1-a4639e81417e\") " pod="openstack/ovn-northd-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.128416 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f002b19-41eb-4af1-a6c1-a4639e81417e-config\") pod \"ovn-northd-0\" (UID: \"3f002b19-41eb-4af1-a6c1-a4639e81417e\") " pod="openstack/ovn-northd-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.128479 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f002b19-41eb-4af1-a6c1-a4639e81417e-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"3f002b19-41eb-4af1-a6c1-a4639e81417e\") " pod="openstack/ovn-northd-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.133099 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3f002b19-41eb-4af1-a6c1-a4639e81417e-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"3f002b19-41eb-4af1-a6c1-a4639e81417e\") " pod="openstack/ovn-northd-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.133677 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f002b19-41eb-4af1-a6c1-a4639e81417e-scripts\") pod \"ovn-northd-0\" (UID: \"3f002b19-41eb-4af1-a6c1-a4639e81417e\") " pod="openstack/ovn-northd-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.134278 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f002b19-41eb-4af1-a6c1-a4639e81417e-config\") pod \"ovn-northd-0\" (UID: \"3f002b19-41eb-4af1-a6c1-a4639e81417e\") " pod="openstack/ovn-northd-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.136958 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f002b19-41eb-4af1-a6c1-a4639e81417e-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"3f002b19-41eb-4af1-a6c1-a4639e81417e\") " pod="openstack/ovn-northd-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.138000 5048 generic.go:334] "Generic (PLEG): container finished" podID="856a2a68-3593-4af9-8285-2f80d731f123" containerID="0962d18b3458f3186a428be39d45f4ed94f92988127dcbf95ad539344c689e19" exitCode=0 Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.138069 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-67pvp" event={"ID":"856a2a68-3593-4af9-8285-2f80d731f123","Type":"ContainerDied","Data":"0962d18b3458f3186a428be39d45f4ed94f92988127dcbf95ad539344c689e19"} Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.138103 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-67pvp" event={"ID":"856a2a68-3593-4af9-8285-2f80d731f123","Type":"ContainerDied","Data":"eb4ef89afc7a1dd2965f3e4b4b35dabc9bd2b7d33919bf62a13dafb110546634"} Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.138125 5048 scope.go:117] "RemoveContainer" containerID="0962d18b3458f3186a428be39d45f4ed94f92988127dcbf95ad539344c689e19" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.138294 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-67pvp" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.139186 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f002b19-41eb-4af1-a6c1-a4639e81417e-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"3f002b19-41eb-4af1-a6c1-a4639e81417e\") " pod="openstack/ovn-northd-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.141512 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/856a2a68-3593-4af9-8285-2f80d731f123-kube-api-access-gpj7x" (OuterVolumeSpecName: "kube-api-access-gpj7x") pod "856a2a68-3593-4af9-8285-2f80d731f123" (UID: "856a2a68-3593-4af9-8285-2f80d731f123"). InnerVolumeSpecName "kube-api-access-gpj7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.144790 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f002b19-41eb-4af1-a6c1-a4639e81417e-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"3f002b19-41eb-4af1-a6c1-a4639e81417e\") " pod="openstack/ovn-northd-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.152332 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4h6s\" (UniqueName: \"kubernetes.io/projected/3f002b19-41eb-4af1-a6c1-a4639e81417e-kube-api-access-c4h6s\") pod \"ovn-northd-0\" (UID: \"3f002b19-41eb-4af1-a6c1-a4639e81417e\") " pod="openstack/ovn-northd-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.156347 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" podStartSLOduration=-9223371991.698446 podStartE2EDuration="45.156330027s" podCreationTimestamp="2025-12-13 06:47:29 +0000 UTC" firstStartedPulling="2025-12-13 06:47:30.017693863 +0000 UTC m=+1083.884288444" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:48:14.148487304 +0000 UTC m=+1128.015081895" watchObservedRunningTime="2025-12-13 06:48:14.156330027 +0000 UTC m=+1128.022924608" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.175198 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/856a2a68-3593-4af9-8285-2f80d731f123-config" (OuterVolumeSpecName: "config") pod "856a2a68-3593-4af9-8285-2f80d731f123" (UID: "856a2a68-3593-4af9-8285-2f80d731f123"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.177389 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/856a2a68-3593-4af9-8285-2f80d731f123-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "856a2a68-3593-4af9-8285-2f80d731f123" (UID: "856a2a68-3593-4af9-8285-2f80d731f123"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.182269 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e3a0bce1-8848-4ac7-a030-19640b952708","Type":"ContainerStarted","Data":"75ee9b498965105594517eef849b5ed0c40d73c2d2599da5a3eae4643255329f"} Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.187074 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"a7bbc535-10f7-44cc-89a6-cbb697149e4a","Type":"ContainerStarted","Data":"7a1fa96f3b72ad7f4e5857f6a5eb077b15e2b09603a5ab2399dffe44b8b8c7fd"} Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.216950 5048 scope.go:117] "RemoveContainer" containerID="3cf9e3657056b6f4f854217eec5225319ee6ecd3981719478c9f928dff936e2a" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.229960 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/856a2a68-3593-4af9-8285-2f80d731f123-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.230003 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gpj7x\" (UniqueName: \"kubernetes.io/projected/856a2a68-3593-4af9-8285-2f80d731f123-kube-api-access-gpj7x\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.230077 5048 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/856a2a68-3593-4af9-8285-2f80d731f123-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.243757 5048 scope.go:117] "RemoveContainer" containerID="0962d18b3458f3186a428be39d45f4ed94f92988127dcbf95ad539344c689e19" Dec 13 06:48:14 crc kubenswrapper[5048]: E1213 06:48:14.246627 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0962d18b3458f3186a428be39d45f4ed94f92988127dcbf95ad539344c689e19\": container with ID starting with 0962d18b3458f3186a428be39d45f4ed94f92988127dcbf95ad539344c689e19 not found: ID does not exist" containerID="0962d18b3458f3186a428be39d45f4ed94f92988127dcbf95ad539344c689e19" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.246683 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0962d18b3458f3186a428be39d45f4ed94f92988127dcbf95ad539344c689e19"} err="failed to get container status \"0962d18b3458f3186a428be39d45f4ed94f92988127dcbf95ad539344c689e19\": rpc error: code = NotFound desc = could not find container \"0962d18b3458f3186a428be39d45f4ed94f92988127dcbf95ad539344c689e19\": container with ID starting with 0962d18b3458f3186a428be39d45f4ed94f92988127dcbf95ad539344c689e19 not found: ID does not exist" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.246719 5048 scope.go:117] "RemoveContainer" containerID="3cf9e3657056b6f4f854217eec5225319ee6ecd3981719478c9f928dff936e2a" Dec 13 06:48:14 crc kubenswrapper[5048]: E1213 06:48:14.247034 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3cf9e3657056b6f4f854217eec5225319ee6ecd3981719478c9f928dff936e2a\": container with ID starting with 3cf9e3657056b6f4f854217eec5225319ee6ecd3981719478c9f928dff936e2a not found: ID does not exist" containerID="3cf9e3657056b6f4f854217eec5225319ee6ecd3981719478c9f928dff936e2a" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.247064 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3cf9e3657056b6f4f854217eec5225319ee6ecd3981719478c9f928dff936e2a"} err="failed to get container status \"3cf9e3657056b6f4f854217eec5225319ee6ecd3981719478c9f928dff936e2a\": rpc error: code = NotFound desc = could not find container \"3cf9e3657056b6f4f854217eec5225319ee6ecd3981719478c9f928dff936e2a\": container with ID starting with 3cf9e3657056b6f4f854217eec5225319ee6ecd3981719478c9f928dff936e2a not found: ID does not exist" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.278078 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.311424 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.415861 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.471821 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-nrmr8"] Dec 13 06:48:14 crc kubenswrapper[5048]: W1213 06:48:14.473882 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0fe36828_9531_4104_bd23_fdc492007087.slice/crio-0b0f6ee46894b0f3111039585b853e0cdcc3fd3d9ee8998d0266d5029246856e WatchSource:0}: Error finding container 0b0f6ee46894b0f3111039585b853e0cdcc3fd3d9ee8998d0266d5029246856e: Status 404 returned error can't find the container with id 0b0f6ee46894b0f3111039585b853e0cdcc3fd3d9ee8998d0266d5029246856e Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.539556 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-67pvp"] Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.551131 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-67pvp"] Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.594292 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="856a2a68-3593-4af9-8285-2f80d731f123" path="/var/lib/kubelet/pods/856a2a68-3593-4af9-8285-2f80d731f123/volumes" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.594923 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-l4wtt"] Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.595000 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.637235 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-dwpg4"] Dec 13 06:48:14 crc kubenswrapper[5048]: I1213 06:48:14.938824 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Dec 13 06:48:14 crc kubenswrapper[5048]: W1213 06:48:14.945713 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3f002b19_41eb_4af1_a6c1_a4639e81417e.slice/crio-be7739ec5cb225d3d4d43dc4792424e6b47c3018fa0329fc58104c2de9afb789 WatchSource:0}: Error finding container be7739ec5cb225d3d4d43dc4792424e6b47c3018fa0329fc58104c2de9afb789: Status 404 returned error can't find the container with id be7739ec5cb225d3d4d43dc4792424e6b47c3018fa0329fc58104c2de9afb789 Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.194400 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-dwpg4" event={"ID":"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4","Type":"ContainerStarted","Data":"0bfca8647b79d07ebb81578a9afe71a866ca78f61eccf72a93c66e66c1c0b594"} Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.195347 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"3f002b19-41eb-4af1-a6c1-a4639e81417e","Type":"ContainerStarted","Data":"be7739ec5cb225d3d4d43dc4792424e6b47c3018fa0329fc58104c2de9afb789"} Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.196503 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-l4wtt" event={"ID":"0da0172c-5a76-44fc-8ff1-e694ba1e083b","Type":"ContainerStarted","Data":"0990e9b60ca6f48c3f169da6fe4eed2b4d64a5124c36e8d9dced70c14dff37e6"} Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.200732 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-nrmr8" event={"ID":"0fe36828-9531-4104-bd23-fdc492007087","Type":"ContainerStarted","Data":"0b0f6ee46894b0f3111039585b853e0cdcc3fd3d9ee8998d0266d5029246856e"} Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.202258 5048 generic.go:334] "Generic (PLEG): container finished" podID="e70bef65-3e49-4e8c-8311-1c490a03fbe0" containerID="2fcdbbf57702d191125b922a5d46f0306170c719650d88372f50f40f6aed9c25" exitCode=0 Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.202387 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" event={"ID":"e70bef65-3e49-4e8c-8311-1c490a03fbe0","Type":"ContainerDied","Data":"2fcdbbf57702d191125b922a5d46f0306170c719650d88372f50f40f6aed9c25"} Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.562575 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.644885 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-nrmr8"] Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.733154 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-j7782"] Dec 13 06:48:15 crc kubenswrapper[5048]: E1213 06:48:15.733585 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="856a2a68-3593-4af9-8285-2f80d731f123" containerName="dnsmasq-dns" Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.733602 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="856a2a68-3593-4af9-8285-2f80d731f123" containerName="dnsmasq-dns" Dec 13 06:48:15 crc kubenswrapper[5048]: E1213 06:48:15.733627 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="856a2a68-3593-4af9-8285-2f80d731f123" containerName="init" Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.733634 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="856a2a68-3593-4af9-8285-2f80d731f123" containerName="init" Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.733839 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="856a2a68-3593-4af9-8285-2f80d731f123" containerName="dnsmasq-dns" Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.744956 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-j7782" Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.880813 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-j7782"] Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.888400 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-j7782\" (UID: \"73c1e78c-8d3f-45d1-976e-284ae877919e\") " pod="openstack/dnsmasq-dns-b8fbc5445-j7782" Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.888476 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cb9tr\" (UniqueName: \"kubernetes.io/projected/73c1e78c-8d3f-45d1-976e-284ae877919e-kube-api-access-cb9tr\") pod \"dnsmasq-dns-b8fbc5445-j7782\" (UID: \"73c1e78c-8d3f-45d1-976e-284ae877919e\") " pod="openstack/dnsmasq-dns-b8fbc5445-j7782" Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.888550 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-j7782\" (UID: \"73c1e78c-8d3f-45d1-976e-284ae877919e\") " pod="openstack/dnsmasq-dns-b8fbc5445-j7782" Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.888584 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-config\") pod \"dnsmasq-dns-b8fbc5445-j7782\" (UID: \"73c1e78c-8d3f-45d1-976e-284ae877919e\") " pod="openstack/dnsmasq-dns-b8fbc5445-j7782" Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.888630 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-j7782\" (UID: \"73c1e78c-8d3f-45d1-976e-284ae877919e\") " pod="openstack/dnsmasq-dns-b8fbc5445-j7782" Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.989849 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-j7782\" (UID: \"73c1e78c-8d3f-45d1-976e-284ae877919e\") " pod="openstack/dnsmasq-dns-b8fbc5445-j7782" Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.989922 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-config\") pod \"dnsmasq-dns-b8fbc5445-j7782\" (UID: \"73c1e78c-8d3f-45d1-976e-284ae877919e\") " pod="openstack/dnsmasq-dns-b8fbc5445-j7782" Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.989980 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-j7782\" (UID: \"73c1e78c-8d3f-45d1-976e-284ae877919e\") " pod="openstack/dnsmasq-dns-b8fbc5445-j7782" Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.990058 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-j7782\" (UID: \"73c1e78c-8d3f-45d1-976e-284ae877919e\") " pod="openstack/dnsmasq-dns-b8fbc5445-j7782" Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.990085 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cb9tr\" (UniqueName: \"kubernetes.io/projected/73c1e78c-8d3f-45d1-976e-284ae877919e-kube-api-access-cb9tr\") pod \"dnsmasq-dns-b8fbc5445-j7782\" (UID: \"73c1e78c-8d3f-45d1-976e-284ae877919e\") " pod="openstack/dnsmasq-dns-b8fbc5445-j7782" Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.991066 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-j7782\" (UID: \"73c1e78c-8d3f-45d1-976e-284ae877919e\") " pod="openstack/dnsmasq-dns-b8fbc5445-j7782" Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.991304 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-config\") pod \"dnsmasq-dns-b8fbc5445-j7782\" (UID: \"73c1e78c-8d3f-45d1-976e-284ae877919e\") " pod="openstack/dnsmasq-dns-b8fbc5445-j7782" Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.991762 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-j7782\" (UID: \"73c1e78c-8d3f-45d1-976e-284ae877919e\") " pod="openstack/dnsmasq-dns-b8fbc5445-j7782" Dec 13 06:48:15 crc kubenswrapper[5048]: I1213 06:48:15.991837 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-j7782\" (UID: \"73c1e78c-8d3f-45d1-976e-284ae877919e\") " pod="openstack/dnsmasq-dns-b8fbc5445-j7782" Dec 13 06:48:16 crc kubenswrapper[5048]: I1213 06:48:16.889976 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Dec 13 06:48:16 crc kubenswrapper[5048]: I1213 06:48:16.898015 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cb9tr\" (UniqueName: \"kubernetes.io/projected/73c1e78c-8d3f-45d1-976e-284ae877919e-kube-api-access-cb9tr\") pod \"dnsmasq-dns-b8fbc5445-j7782\" (UID: \"73c1e78c-8d3f-45d1-976e-284ae877919e\") " pod="openstack/dnsmasq-dns-b8fbc5445-j7782" Dec 13 06:48:16 crc kubenswrapper[5048]: I1213 06:48:16.926045 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Dec 13 06:48:16 crc kubenswrapper[5048]: I1213 06:48:16.928039 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Dec 13 06:48:16 crc kubenswrapper[5048]: I1213 06:48:16.928282 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-tk569" Dec 13 06:48:16 crc kubenswrapper[5048]: I1213 06:48:16.928684 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Dec 13 06:48:16 crc kubenswrapper[5048]: I1213 06:48:16.928829 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Dec 13 06:48:16 crc kubenswrapper[5048]: I1213 06:48:16.977049 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.008855 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-j7782" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.070749 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-xs94r"] Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.071910 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.074747 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.074933 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.075041 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.103009 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-xs94r"] Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.110508 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-lock\") pod \"swift-storage-0\" (UID: \"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4\") " pod="openstack/swift-storage-0" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.110606 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-cache\") pod \"swift-storage-0\" (UID: \"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4\") " pod="openstack/swift-storage-0" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.110702 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7b4m\" (UniqueName: \"kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-kube-api-access-n7b4m\") pod \"swift-storage-0\" (UID: \"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4\") " pod="openstack/swift-storage-0" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.110748 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-etc-swift\") pod \"swift-storage-0\" (UID: \"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4\") " pod="openstack/swift-storage-0" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.110787 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-0\" (UID: \"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4\") " pod="openstack/swift-storage-0" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.211832 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7b4m\" (UniqueName: \"kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-kube-api-access-n7b4m\") pod \"swift-storage-0\" (UID: \"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4\") " pod="openstack/swift-storage-0" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.211903 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/76f71d51-d887-428e-bcf0-e07a75cda134-dispersionconf\") pod \"swift-ring-rebalance-xs94r\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.211938 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/76f71d51-d887-428e-bcf0-e07a75cda134-scripts\") pod \"swift-ring-rebalance-xs94r\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.211966 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-etc-swift\") pod \"swift-storage-0\" (UID: \"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4\") " pod="openstack/swift-storage-0" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.211998 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-0\" (UID: \"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4\") " pod="openstack/swift-storage-0" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.212032 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76f71d51-d887-428e-bcf0-e07a75cda134-combined-ca-bundle\") pod \"swift-ring-rebalance-xs94r\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.212062 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-lock\") pod \"swift-storage-0\" (UID: \"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4\") " pod="openstack/swift-storage-0" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.212082 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/76f71d51-d887-428e-bcf0-e07a75cda134-swiftconf\") pod \"swift-ring-rebalance-xs94r\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.212145 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/76f71d51-d887-428e-bcf0-e07a75cda134-ring-data-devices\") pod \"swift-ring-rebalance-xs94r\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.212175 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-cache\") pod \"swift-storage-0\" (UID: \"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4\") " pod="openstack/swift-storage-0" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.212200 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8wnk\" (UniqueName: \"kubernetes.io/projected/76f71d51-d887-428e-bcf0-e07a75cda134-kube-api-access-k8wnk\") pod \"swift-ring-rebalance-xs94r\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.212234 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/76f71d51-d887-428e-bcf0-e07a75cda134-etc-swift\") pod \"swift-ring-rebalance-xs94r\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: E1213 06:48:17.212699 5048 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 13 06:48:17 crc kubenswrapper[5048]: E1213 06:48:17.212718 5048 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 13 06:48:17 crc kubenswrapper[5048]: E1213 06:48:17.212763 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-etc-swift podName:b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4 nodeName:}" failed. No retries permitted until 2025-12-13 06:48:17.712743927 +0000 UTC m=+1131.579338508 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-etc-swift") pod "swift-storage-0" (UID: "b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4") : configmap "swift-ring-files" not found Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.213272 5048 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-0\" (UID: \"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/swift-storage-0" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.216908 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-lock\") pod \"swift-storage-0\" (UID: \"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4\") " pod="openstack/swift-storage-0" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.217393 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-cache\") pod \"swift-storage-0\" (UID: \"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4\") " pod="openstack/swift-storage-0" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.241036 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-0\" (UID: \"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4\") " pod="openstack/swift-storage-0" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.247625 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7b4m\" (UniqueName: \"kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-kube-api-access-n7b4m\") pod \"swift-storage-0\" (UID: \"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4\") " pod="openstack/swift-storage-0" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.313302 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/76f71d51-d887-428e-bcf0-e07a75cda134-dispersionconf\") pod \"swift-ring-rebalance-xs94r\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.313357 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/76f71d51-d887-428e-bcf0-e07a75cda134-scripts\") pod \"swift-ring-rebalance-xs94r\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.313420 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76f71d51-d887-428e-bcf0-e07a75cda134-combined-ca-bundle\") pod \"swift-ring-rebalance-xs94r\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.313472 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/76f71d51-d887-428e-bcf0-e07a75cda134-swiftconf\") pod \"swift-ring-rebalance-xs94r\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.313520 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/76f71d51-d887-428e-bcf0-e07a75cda134-ring-data-devices\") pod \"swift-ring-rebalance-xs94r\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.313544 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8wnk\" (UniqueName: \"kubernetes.io/projected/76f71d51-d887-428e-bcf0-e07a75cda134-kube-api-access-k8wnk\") pod \"swift-ring-rebalance-xs94r\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.313569 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/76f71d51-d887-428e-bcf0-e07a75cda134-etc-swift\") pod \"swift-ring-rebalance-xs94r\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.316035 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/76f71d51-d887-428e-bcf0-e07a75cda134-etc-swift\") pod \"swift-ring-rebalance-xs94r\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.316979 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/76f71d51-d887-428e-bcf0-e07a75cda134-ring-data-devices\") pod \"swift-ring-rebalance-xs94r\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.318531 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/76f71d51-d887-428e-bcf0-e07a75cda134-scripts\") pod \"swift-ring-rebalance-xs94r\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.318726 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/76f71d51-d887-428e-bcf0-e07a75cda134-dispersionconf\") pod \"swift-ring-rebalance-xs94r\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.318773 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/76f71d51-d887-428e-bcf0-e07a75cda134-swiftconf\") pod \"swift-ring-rebalance-xs94r\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.323455 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76f71d51-d887-428e-bcf0-e07a75cda134-combined-ca-bundle\") pod \"swift-ring-rebalance-xs94r\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.337558 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8wnk\" (UniqueName: \"kubernetes.io/projected/76f71d51-d887-428e-bcf0-e07a75cda134-kube-api-access-k8wnk\") pod \"swift-ring-rebalance-xs94r\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.420471 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.594670 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-j7782"] Dec 13 06:48:17 crc kubenswrapper[5048]: W1213 06:48:17.603916 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod73c1e78c_8d3f_45d1_976e_284ae877919e.slice/crio-d77c7adab7b6d0102b976b681ba67ec52799e87c8dc5b5d5866e4e7e24f09be0 WatchSource:0}: Error finding container d77c7adab7b6d0102b976b681ba67ec52799e87c8dc5b5d5866e4e7e24f09be0: Status 404 returned error can't find the container with id d77c7adab7b6d0102b976b681ba67ec52799e87c8dc5b5d5866e4e7e24f09be0 Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.723052 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-etc-swift\") pod \"swift-storage-0\" (UID: \"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4\") " pod="openstack/swift-storage-0" Dec 13 06:48:17 crc kubenswrapper[5048]: E1213 06:48:17.725071 5048 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 13 06:48:17 crc kubenswrapper[5048]: E1213 06:48:17.725087 5048 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 13 06:48:17 crc kubenswrapper[5048]: E1213 06:48:17.725138 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-etc-swift podName:b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4 nodeName:}" failed. No retries permitted until 2025-12-13 06:48:18.725124436 +0000 UTC m=+1132.591719017 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-etc-swift") pod "swift-storage-0" (UID: "b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4") : configmap "swift-ring-files" not found Dec 13 06:48:17 crc kubenswrapper[5048]: I1213 06:48:17.922499 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-xs94r"] Dec 13 06:48:18 crc kubenswrapper[5048]: I1213 06:48:18.223001 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-xs94r" event={"ID":"76f71d51-d887-428e-bcf0-e07a75cda134","Type":"ContainerStarted","Data":"0cd3daa0dfda4436dfe10e57a779dcb364097126a38ed53a0334d25a4fd899d2"} Dec 13 06:48:18 crc kubenswrapper[5048]: I1213 06:48:18.224429 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-j7782" event={"ID":"73c1e78c-8d3f-45d1-976e-284ae877919e","Type":"ContainerStarted","Data":"d77c7adab7b6d0102b976b681ba67ec52799e87c8dc5b5d5866e4e7e24f09be0"} Dec 13 06:48:18 crc kubenswrapper[5048]: I1213 06:48:18.740722 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-etc-swift\") pod \"swift-storage-0\" (UID: \"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4\") " pod="openstack/swift-storage-0" Dec 13 06:48:18 crc kubenswrapper[5048]: E1213 06:48:18.740976 5048 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 13 06:48:18 crc kubenswrapper[5048]: E1213 06:48:18.741211 5048 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 13 06:48:18 crc kubenswrapper[5048]: E1213 06:48:18.741274 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-etc-swift podName:b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4 nodeName:}" failed. No retries permitted until 2025-12-13 06:48:20.741251809 +0000 UTC m=+1134.607846380 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-etc-swift") pod "swift-storage-0" (UID: "b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4") : configmap "swift-ring-files" not found Dec 13 06:48:18 crc kubenswrapper[5048]: I1213 06:48:18.839499 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" Dec 13 06:48:18 crc kubenswrapper[5048]: I1213 06:48:18.943913 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e70bef65-3e49-4e8c-8311-1c490a03fbe0-config\") pod \"e70bef65-3e49-4e8c-8311-1c490a03fbe0\" (UID: \"e70bef65-3e49-4e8c-8311-1c490a03fbe0\") " Dec 13 06:48:18 crc kubenswrapper[5048]: I1213 06:48:18.943981 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2sl8x\" (UniqueName: \"kubernetes.io/projected/e70bef65-3e49-4e8c-8311-1c490a03fbe0-kube-api-access-2sl8x\") pod \"e70bef65-3e49-4e8c-8311-1c490a03fbe0\" (UID: \"e70bef65-3e49-4e8c-8311-1c490a03fbe0\") " Dec 13 06:48:18 crc kubenswrapper[5048]: I1213 06:48:18.944093 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e70bef65-3e49-4e8c-8311-1c490a03fbe0-dns-svc\") pod \"e70bef65-3e49-4e8c-8311-1c490a03fbe0\" (UID: \"e70bef65-3e49-4e8c-8311-1c490a03fbe0\") " Dec 13 06:48:18 crc kubenswrapper[5048]: I1213 06:48:18.949979 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e70bef65-3e49-4e8c-8311-1c490a03fbe0-kube-api-access-2sl8x" (OuterVolumeSpecName: "kube-api-access-2sl8x") pod "e70bef65-3e49-4e8c-8311-1c490a03fbe0" (UID: "e70bef65-3e49-4e8c-8311-1c490a03fbe0"). InnerVolumeSpecName "kube-api-access-2sl8x". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:48:19 crc kubenswrapper[5048]: I1213 06:48:19.021332 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e70bef65-3e49-4e8c-8311-1c490a03fbe0-config" (OuterVolumeSpecName: "config") pod "e70bef65-3e49-4e8c-8311-1c490a03fbe0" (UID: "e70bef65-3e49-4e8c-8311-1c490a03fbe0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:48:19 crc kubenswrapper[5048]: I1213 06:48:19.026949 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e70bef65-3e49-4e8c-8311-1c490a03fbe0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e70bef65-3e49-4e8c-8311-1c490a03fbe0" (UID: "e70bef65-3e49-4e8c-8311-1c490a03fbe0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:48:19 crc kubenswrapper[5048]: I1213 06:48:19.045906 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e70bef65-3e49-4e8c-8311-1c490a03fbe0-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:19 crc kubenswrapper[5048]: I1213 06:48:19.045931 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2sl8x\" (UniqueName: \"kubernetes.io/projected/e70bef65-3e49-4e8c-8311-1c490a03fbe0-kube-api-access-2sl8x\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:19 crc kubenswrapper[5048]: I1213 06:48:19.045944 5048 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e70bef65-3e49-4e8c-8311-1c490a03fbe0-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:19 crc kubenswrapper[5048]: I1213 06:48:19.272964 5048 generic.go:334] "Generic (PLEG): container finished" podID="4e4bfc5f-4bb9-4cd0-8686-854d60a380a4" containerID="67d09d55b28e40d3532eb26640c5e0b97c5e9f0fdf2082c143a1ba2ba22218ee" exitCode=0 Dec 13 06:48:19 crc kubenswrapper[5048]: I1213 06:48:19.273034 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-dwpg4" event={"ID":"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4","Type":"ContainerDied","Data":"67d09d55b28e40d3532eb26640c5e0b97c5e9f0fdf2082c143a1ba2ba22218ee"} Dec 13 06:48:19 crc kubenswrapper[5048]: I1213 06:48:19.297903 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-l4wtt" event={"ID":"0da0172c-5a76-44fc-8ff1-e694ba1e083b","Type":"ContainerStarted","Data":"39cc15bb8cc05977f4549440fe68e347a9eeb3c2ebf98aba58deb26bea6a76ce"} Dec 13 06:48:19 crc kubenswrapper[5048]: I1213 06:48:19.312521 5048 generic.go:334] "Generic (PLEG): container finished" podID="0fe36828-9531-4104-bd23-fdc492007087" containerID="addff9884945839745b164432ad7ee6a0fae6b7d1e43110be5c82067b5589f5d" exitCode=0 Dec 13 06:48:19 crc kubenswrapper[5048]: I1213 06:48:19.312624 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-nrmr8" event={"ID":"0fe36828-9531-4104-bd23-fdc492007087","Type":"ContainerDied","Data":"addff9884945839745b164432ad7ee6a0fae6b7d1e43110be5c82067b5589f5d"} Dec 13 06:48:19 crc kubenswrapper[5048]: I1213 06:48:19.337903 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" Dec 13 06:48:19 crc kubenswrapper[5048]: I1213 06:48:19.338062 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-fvvwg" event={"ID":"e70bef65-3e49-4e8c-8311-1c490a03fbe0","Type":"ContainerDied","Data":"384d0b0ff7c45c873bc67106848a894adbef57e91ced3e668311cea43ce6639a"} Dec 13 06:48:19 crc kubenswrapper[5048]: I1213 06:48:19.338107 5048 scope.go:117] "RemoveContainer" containerID="2fcdbbf57702d191125b922a5d46f0306170c719650d88372f50f40f6aed9c25" Dec 13 06:48:19 crc kubenswrapper[5048]: I1213 06:48:19.346075 5048 generic.go:334] "Generic (PLEG): container finished" podID="73c1e78c-8d3f-45d1-976e-284ae877919e" containerID="283d10bcbd3be3474e0ad1377274a2d802168b7709ad649dff2c63f3dbb6e82e" exitCode=0 Dec 13 06:48:19 crc kubenswrapper[5048]: I1213 06:48:19.346120 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-j7782" event={"ID":"73c1e78c-8d3f-45d1-976e-284ae877919e","Type":"ContainerDied","Data":"283d10bcbd3be3474e0ad1377274a2d802168b7709ad649dff2c63f3dbb6e82e"} Dec 13 06:48:19 crc kubenswrapper[5048]: I1213 06:48:19.428608 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-l4wtt" podStartSLOduration=6.428574788 podStartE2EDuration="6.428574788s" podCreationTimestamp="2025-12-13 06:48:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:48:19.367626164 +0000 UTC m=+1133.234220745" watchObservedRunningTime="2025-12-13 06:48:19.428574788 +0000 UTC m=+1133.295169369" Dec 13 06:48:19 crc kubenswrapper[5048]: I1213 06:48:19.438665 5048 scope.go:117] "RemoveContainer" containerID="7bba1a3f4ced5a87b6478d79016f90b501dea92d78398496f46f9cac138df0e9" Dec 13 06:48:19 crc kubenswrapper[5048]: I1213 06:48:19.481153 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-fvvwg"] Dec 13 06:48:19 crc kubenswrapper[5048]: I1213 06:48:19.493886 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-fvvwg"] Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.003663 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-nrmr8" Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.065112 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0fe36828-9531-4104-bd23-fdc492007087-dns-svc\") pod \"0fe36828-9531-4104-bd23-fdc492007087\" (UID: \"0fe36828-9531-4104-bd23-fdc492007087\") " Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.065204 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0fe36828-9531-4104-bd23-fdc492007087-ovsdbserver-nb\") pod \"0fe36828-9531-4104-bd23-fdc492007087\" (UID: \"0fe36828-9531-4104-bd23-fdc492007087\") " Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.065239 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-49flj\" (UniqueName: \"kubernetes.io/projected/0fe36828-9531-4104-bd23-fdc492007087-kube-api-access-49flj\") pod \"0fe36828-9531-4104-bd23-fdc492007087\" (UID: \"0fe36828-9531-4104-bd23-fdc492007087\") " Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.065404 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0fe36828-9531-4104-bd23-fdc492007087-config\") pod \"0fe36828-9531-4104-bd23-fdc492007087\" (UID: \"0fe36828-9531-4104-bd23-fdc492007087\") " Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.073122 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0fe36828-9531-4104-bd23-fdc492007087-kube-api-access-49flj" (OuterVolumeSpecName: "kube-api-access-49flj") pod "0fe36828-9531-4104-bd23-fdc492007087" (UID: "0fe36828-9531-4104-bd23-fdc492007087"). InnerVolumeSpecName "kube-api-access-49flj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.092778 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0fe36828-9531-4104-bd23-fdc492007087-config" (OuterVolumeSpecName: "config") pod "0fe36828-9531-4104-bd23-fdc492007087" (UID: "0fe36828-9531-4104-bd23-fdc492007087"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.096093 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0fe36828-9531-4104-bd23-fdc492007087-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0fe36828-9531-4104-bd23-fdc492007087" (UID: "0fe36828-9531-4104-bd23-fdc492007087"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.100782 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0fe36828-9531-4104-bd23-fdc492007087-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0fe36828-9531-4104-bd23-fdc492007087" (UID: "0fe36828-9531-4104-bd23-fdc492007087"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.168822 5048 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0fe36828-9531-4104-bd23-fdc492007087-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.168856 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0fe36828-9531-4104-bd23-fdc492007087-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.168869 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-49flj\" (UniqueName: \"kubernetes.io/projected/0fe36828-9531-4104-bd23-fdc492007087-kube-api-access-49flj\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.168882 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0fe36828-9531-4104-bd23-fdc492007087-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:20 crc kubenswrapper[5048]: E1213 06:48:20.256697 5048 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Dec 13 06:48:20 crc kubenswrapper[5048]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Dec 13 06:48:20 crc kubenswrapper[5048]: > podSandboxID="0bfca8647b79d07ebb81578a9afe71a866ca78f61eccf72a93c66e66c1c0b594" Dec 13 06:48:20 crc kubenswrapper[5048]: E1213 06:48:20.256869 5048 kuberuntime_manager.go:1274] "Unhandled Error" err=< Dec 13 06:48:20 crc kubenswrapper[5048]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n654h99h64ch5dbh6dh555h587h64bh5cfh647h5fdh57ch679h9h597h5f5hbch59bh54fh575h566h667h586h5f5h65ch5bch57h68h65ch58bh694h5cfq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-nb,SubPath:ovsdbserver-nb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-sb,SubPath:ovsdbserver-sb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rpzt6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-8554648995-dwpg4_openstack(4e4bfc5f-4bb9-4cd0-8686-854d60a380a4): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Dec 13 06:48:20 crc kubenswrapper[5048]: > logger="UnhandledError" Dec 13 06:48:20 crc kubenswrapper[5048]: E1213 06:48:20.258046 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-8554648995-dwpg4" podUID="4e4bfc5f-4bb9-4cd0-8686-854d60a380a4" Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.366187 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-j7782" event={"ID":"73c1e78c-8d3f-45d1-976e-284ae877919e","Type":"ContainerStarted","Data":"0f3e8e0048d88d3438413b9205bc74cd2cefd5c1ecd653dcb34749470109e453"} Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.366310 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-j7782" Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.368604 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"3f002b19-41eb-4af1-a6c1-a4639e81417e","Type":"ContainerStarted","Data":"9644f0c062773160c86b4a48de7db271a6c520ad6f61155b7ca9e045cbe42a66"} Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.371970 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-nrmr8" event={"ID":"0fe36828-9531-4104-bd23-fdc492007087","Type":"ContainerDied","Data":"0b0f6ee46894b0f3111039585b853e0cdcc3fd3d9ee8998d0266d5029246856e"} Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.372002 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-nrmr8" Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.372019 5048 scope.go:117] "RemoveContainer" containerID="addff9884945839745b164432ad7ee6a0fae6b7d1e43110be5c82067b5589f5d" Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.387593 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-j7782" podStartSLOduration=5.387575331 podStartE2EDuration="5.387575331s" podCreationTimestamp="2025-12-13 06:48:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:48:20.386916843 +0000 UTC m=+1134.253511424" watchObservedRunningTime="2025-12-13 06:48:20.387575331 +0000 UTC m=+1134.254169902" Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.443631 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-nrmr8"] Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.458753 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-nrmr8"] Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.615694 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0fe36828-9531-4104-bd23-fdc492007087" path="/var/lib/kubelet/pods/0fe36828-9531-4104-bd23-fdc492007087/volumes" Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.616931 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e70bef65-3e49-4e8c-8311-1c490a03fbe0" path="/var/lib/kubelet/pods/e70bef65-3e49-4e8c-8311-1c490a03fbe0/volumes" Dec 13 06:48:20 crc kubenswrapper[5048]: I1213 06:48:20.784979 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-etc-swift\") pod \"swift-storage-0\" (UID: \"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4\") " pod="openstack/swift-storage-0" Dec 13 06:48:20 crc kubenswrapper[5048]: E1213 06:48:20.785203 5048 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 13 06:48:20 crc kubenswrapper[5048]: E1213 06:48:20.785245 5048 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 13 06:48:20 crc kubenswrapper[5048]: E1213 06:48:20.785306 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-etc-swift podName:b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4 nodeName:}" failed. No retries permitted until 2025-12-13 06:48:24.785286597 +0000 UTC m=+1138.651881178 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-etc-swift") pod "swift-storage-0" (UID: "b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4") : configmap "swift-ring-files" not found Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.118777 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-nztt9"] Dec 13 06:48:23 crc kubenswrapper[5048]: E1213 06:48:23.120970 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fe36828-9531-4104-bd23-fdc492007087" containerName="init" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.121069 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fe36828-9531-4104-bd23-fdc492007087" containerName="init" Dec 13 06:48:23 crc kubenswrapper[5048]: E1213 06:48:23.121163 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e70bef65-3e49-4e8c-8311-1c490a03fbe0" containerName="init" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.121236 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="e70bef65-3e49-4e8c-8311-1c490a03fbe0" containerName="init" Dec 13 06:48:23 crc kubenswrapper[5048]: E1213 06:48:23.121314 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e70bef65-3e49-4e8c-8311-1c490a03fbe0" containerName="dnsmasq-dns" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.121390 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="e70bef65-3e49-4e8c-8311-1c490a03fbe0" containerName="dnsmasq-dns" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.121679 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fe36828-9531-4104-bd23-fdc492007087" containerName="init" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.121777 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="e70bef65-3e49-4e8c-8311-1c490a03fbe0" containerName="dnsmasq-dns" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.122576 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-nztt9" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.135484 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-nztt9"] Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.148961 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-54a6-account-create-update-fvkg9"] Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.150390 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-54a6-account-create-update-fvkg9" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.153578 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.166502 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-54a6-account-create-update-fvkg9"] Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.230418 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-df8m2\" (UniqueName: \"kubernetes.io/projected/a731241e-7fd1-45cb-9204-e71ff2a62a91-kube-api-access-df8m2\") pod \"keystone-54a6-account-create-update-fvkg9\" (UID: \"a731241e-7fd1-45cb-9204-e71ff2a62a91\") " pod="openstack/keystone-54a6-account-create-update-fvkg9" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.230808 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zw5x4\" (UniqueName: \"kubernetes.io/projected/9b80676d-e344-4949-8a27-22381100d354-kube-api-access-zw5x4\") pod \"keystone-db-create-nztt9\" (UID: \"9b80676d-e344-4949-8a27-22381100d354\") " pod="openstack/keystone-db-create-nztt9" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.230973 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a731241e-7fd1-45cb-9204-e71ff2a62a91-operator-scripts\") pod \"keystone-54a6-account-create-update-fvkg9\" (UID: \"a731241e-7fd1-45cb-9204-e71ff2a62a91\") " pod="openstack/keystone-54a6-account-create-update-fvkg9" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.231130 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b80676d-e344-4949-8a27-22381100d354-operator-scripts\") pod \"keystone-db-create-nztt9\" (UID: \"9b80676d-e344-4949-8a27-22381100d354\") " pod="openstack/keystone-db-create-nztt9" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.310855 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-bfzrz"] Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.311773 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-bfzrz" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.320446 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-bfzrz"] Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.332444 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-df8m2\" (UniqueName: \"kubernetes.io/projected/a731241e-7fd1-45cb-9204-e71ff2a62a91-kube-api-access-df8m2\") pod \"keystone-54a6-account-create-update-fvkg9\" (UID: \"a731241e-7fd1-45cb-9204-e71ff2a62a91\") " pod="openstack/keystone-54a6-account-create-update-fvkg9" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.332693 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zw5x4\" (UniqueName: \"kubernetes.io/projected/9b80676d-e344-4949-8a27-22381100d354-kube-api-access-zw5x4\") pod \"keystone-db-create-nztt9\" (UID: \"9b80676d-e344-4949-8a27-22381100d354\") " pod="openstack/keystone-db-create-nztt9" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.332834 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a731241e-7fd1-45cb-9204-e71ff2a62a91-operator-scripts\") pod \"keystone-54a6-account-create-update-fvkg9\" (UID: \"a731241e-7fd1-45cb-9204-e71ff2a62a91\") " pod="openstack/keystone-54a6-account-create-update-fvkg9" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.332918 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b80676d-e344-4949-8a27-22381100d354-operator-scripts\") pod \"keystone-db-create-nztt9\" (UID: \"9b80676d-e344-4949-8a27-22381100d354\") " pod="openstack/keystone-db-create-nztt9" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.333784 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b80676d-e344-4949-8a27-22381100d354-operator-scripts\") pod \"keystone-db-create-nztt9\" (UID: \"9b80676d-e344-4949-8a27-22381100d354\") " pod="openstack/keystone-db-create-nztt9" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.333889 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a731241e-7fd1-45cb-9204-e71ff2a62a91-operator-scripts\") pod \"keystone-54a6-account-create-update-fvkg9\" (UID: \"a731241e-7fd1-45cb-9204-e71ff2a62a91\") " pod="openstack/keystone-54a6-account-create-update-fvkg9" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.351380 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zw5x4\" (UniqueName: \"kubernetes.io/projected/9b80676d-e344-4949-8a27-22381100d354-kube-api-access-zw5x4\") pod \"keystone-db-create-nztt9\" (UID: \"9b80676d-e344-4949-8a27-22381100d354\") " pod="openstack/keystone-db-create-nztt9" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.353711 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-df8m2\" (UniqueName: \"kubernetes.io/projected/a731241e-7fd1-45cb-9204-e71ff2a62a91-kube-api-access-df8m2\") pod \"keystone-54a6-account-create-update-fvkg9\" (UID: \"a731241e-7fd1-45cb-9204-e71ff2a62a91\") " pod="openstack/keystone-54a6-account-create-update-fvkg9" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.393519 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"3f002b19-41eb-4af1-a6c1-a4639e81417e","Type":"ContainerStarted","Data":"ab29d93cf8fabe54637b1d6cda279a3d74a0c6f265544f1db83e9ab2e82c4189"} Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.393691 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.397869 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-xs94r" event={"ID":"76f71d51-d887-428e-bcf0-e07a75cda134","Type":"ContainerStarted","Data":"e20c99d9f28d3fe7aeb127ebec094b91b05ef295539a0ac60516d368f72fbab9"} Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.399246 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-dwpg4" event={"ID":"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4","Type":"ContainerStarted","Data":"e030a9bce163962e4c0b52716c274fbbd5131790ef1734d5b652f81e375db45f"} Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.399420 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-dwpg4" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.426377 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=5.378598406 podStartE2EDuration="10.426353362s" podCreationTimestamp="2025-12-13 06:48:13 +0000 UTC" firstStartedPulling="2025-12-13 06:48:14.94917695 +0000 UTC m=+1128.815771531" lastFinishedPulling="2025-12-13 06:48:19.996931906 +0000 UTC m=+1133.863526487" observedRunningTime="2025-12-13 06:48:23.422190359 +0000 UTC m=+1137.288784940" watchObservedRunningTime="2025-12-13 06:48:23.426353362 +0000 UTC m=+1137.292947953" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.436067 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tj58w\" (UniqueName: \"kubernetes.io/projected/19c49aa3-f62c-4c78-a8b7-a6858f7da04e-kube-api-access-tj58w\") pod \"placement-db-create-bfzrz\" (UID: \"19c49aa3-f62c-4c78-a8b7-a6858f7da04e\") " pod="openstack/placement-db-create-bfzrz" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.436507 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19c49aa3-f62c-4c78-a8b7-a6858f7da04e-operator-scripts\") pod \"placement-db-create-bfzrz\" (UID: \"19c49aa3-f62c-4c78-a8b7-a6858f7da04e\") " pod="openstack/placement-db-create-bfzrz" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.452903 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-nztt9" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.461898 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-dac3-account-create-update-nrtk8"] Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.463228 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-dac3-account-create-update-nrtk8" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.465455 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.467305 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-54a6-account-create-update-fvkg9" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.480109 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-dac3-account-create-update-nrtk8"] Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.490061 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8554648995-dwpg4" podStartSLOduration=10.490041 podStartE2EDuration="10.490041s" podCreationTimestamp="2025-12-13 06:48:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:48:23.475010622 +0000 UTC m=+1137.341605223" watchObservedRunningTime="2025-12-13 06:48:23.490041 +0000 UTC m=+1137.356635591" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.494625 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-xs94r" podStartSLOduration=1.7484863769999999 podStartE2EDuration="6.494605974s" podCreationTimestamp="2025-12-13 06:48:17 +0000 UTC" firstStartedPulling="2025-12-13 06:48:17.930989245 +0000 UTC m=+1131.797583826" lastFinishedPulling="2025-12-13 06:48:22.677108842 +0000 UTC m=+1136.543703423" observedRunningTime="2025-12-13 06:48:23.491713186 +0000 UTC m=+1137.358307777" watchObservedRunningTime="2025-12-13 06:48:23.494605974 +0000 UTC m=+1137.361200565" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.537774 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhxjb\" (UniqueName: \"kubernetes.io/projected/d790a6f6-a1d9-4f9c-84ae-de2358b07262-kube-api-access-hhxjb\") pod \"placement-dac3-account-create-update-nrtk8\" (UID: \"d790a6f6-a1d9-4f9c-84ae-de2358b07262\") " pod="openstack/placement-dac3-account-create-update-nrtk8" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.538070 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19c49aa3-f62c-4c78-a8b7-a6858f7da04e-operator-scripts\") pod \"placement-db-create-bfzrz\" (UID: \"19c49aa3-f62c-4c78-a8b7-a6858f7da04e\") " pod="openstack/placement-db-create-bfzrz" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.538114 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d790a6f6-a1d9-4f9c-84ae-de2358b07262-operator-scripts\") pod \"placement-dac3-account-create-update-nrtk8\" (UID: \"d790a6f6-a1d9-4f9c-84ae-de2358b07262\") " pod="openstack/placement-dac3-account-create-update-nrtk8" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.538169 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tj58w\" (UniqueName: \"kubernetes.io/projected/19c49aa3-f62c-4c78-a8b7-a6858f7da04e-kube-api-access-tj58w\") pod \"placement-db-create-bfzrz\" (UID: \"19c49aa3-f62c-4c78-a8b7-a6858f7da04e\") " pod="openstack/placement-db-create-bfzrz" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.541363 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19c49aa3-f62c-4c78-a8b7-a6858f7da04e-operator-scripts\") pod \"placement-db-create-bfzrz\" (UID: \"19c49aa3-f62c-4c78-a8b7-a6858f7da04e\") " pod="openstack/placement-db-create-bfzrz" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.559784 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tj58w\" (UniqueName: \"kubernetes.io/projected/19c49aa3-f62c-4c78-a8b7-a6858f7da04e-kube-api-access-tj58w\") pod \"placement-db-create-bfzrz\" (UID: \"19c49aa3-f62c-4c78-a8b7-a6858f7da04e\") " pod="openstack/placement-db-create-bfzrz" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.626885 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-bfzrz" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.639892 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d790a6f6-a1d9-4f9c-84ae-de2358b07262-operator-scripts\") pod \"placement-dac3-account-create-update-nrtk8\" (UID: \"d790a6f6-a1d9-4f9c-84ae-de2358b07262\") " pod="openstack/placement-dac3-account-create-update-nrtk8" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.640027 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhxjb\" (UniqueName: \"kubernetes.io/projected/d790a6f6-a1d9-4f9c-84ae-de2358b07262-kube-api-access-hhxjb\") pod \"placement-dac3-account-create-update-nrtk8\" (UID: \"d790a6f6-a1d9-4f9c-84ae-de2358b07262\") " pod="openstack/placement-dac3-account-create-update-nrtk8" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.641237 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d790a6f6-a1d9-4f9c-84ae-de2358b07262-operator-scripts\") pod \"placement-dac3-account-create-update-nrtk8\" (UID: \"d790a6f6-a1d9-4f9c-84ae-de2358b07262\") " pod="openstack/placement-dac3-account-create-update-nrtk8" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.661155 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhxjb\" (UniqueName: \"kubernetes.io/projected/d790a6f6-a1d9-4f9c-84ae-de2358b07262-kube-api-access-hhxjb\") pod \"placement-dac3-account-create-update-nrtk8\" (UID: \"d790a6f6-a1d9-4f9c-84ae-de2358b07262\") " pod="openstack/placement-dac3-account-create-update-nrtk8" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.702508 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-rqc56"] Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.703841 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-rqc56" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.735991 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-rqc56"] Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.801755 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-cb17-account-create-update-rhz4v"] Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.813330 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-dac3-account-create-update-nrtk8" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.814044 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-cb17-account-create-update-rhz4v" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.820361 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-cb17-account-create-update-rhz4v"] Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.822820 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.854300 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c65wg\" (UniqueName: \"kubernetes.io/projected/a1efce17-5954-4d64-b35e-8ac14d24e7b0-kube-api-access-c65wg\") pod \"glance-db-create-rqc56\" (UID: \"a1efce17-5954-4d64-b35e-8ac14d24e7b0\") " pod="openstack/glance-db-create-rqc56" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.854471 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a1efce17-5954-4d64-b35e-8ac14d24e7b0-operator-scripts\") pod \"glance-db-create-rqc56\" (UID: \"a1efce17-5954-4d64-b35e-8ac14d24e7b0\") " pod="openstack/glance-db-create-rqc56" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.955829 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a1efce17-5954-4d64-b35e-8ac14d24e7b0-operator-scripts\") pod \"glance-db-create-rqc56\" (UID: \"a1efce17-5954-4d64-b35e-8ac14d24e7b0\") " pod="openstack/glance-db-create-rqc56" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.955898 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rp57g\" (UniqueName: \"kubernetes.io/projected/d5ec0a88-0ac6-437e-8aa2-293ac501fadc-kube-api-access-rp57g\") pod \"glance-cb17-account-create-update-rhz4v\" (UID: \"d5ec0a88-0ac6-437e-8aa2-293ac501fadc\") " pod="openstack/glance-cb17-account-create-update-rhz4v" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.955963 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d5ec0a88-0ac6-437e-8aa2-293ac501fadc-operator-scripts\") pod \"glance-cb17-account-create-update-rhz4v\" (UID: \"d5ec0a88-0ac6-437e-8aa2-293ac501fadc\") " pod="openstack/glance-cb17-account-create-update-rhz4v" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.955986 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c65wg\" (UniqueName: \"kubernetes.io/projected/a1efce17-5954-4d64-b35e-8ac14d24e7b0-kube-api-access-c65wg\") pod \"glance-db-create-rqc56\" (UID: \"a1efce17-5954-4d64-b35e-8ac14d24e7b0\") " pod="openstack/glance-db-create-rqc56" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.958193 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a1efce17-5954-4d64-b35e-8ac14d24e7b0-operator-scripts\") pod \"glance-db-create-rqc56\" (UID: \"a1efce17-5954-4d64-b35e-8ac14d24e7b0\") " pod="openstack/glance-db-create-rqc56" Dec 13 06:48:23 crc kubenswrapper[5048]: I1213 06:48:23.976762 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c65wg\" (UniqueName: \"kubernetes.io/projected/a1efce17-5954-4d64-b35e-8ac14d24e7b0-kube-api-access-c65wg\") pod \"glance-db-create-rqc56\" (UID: \"a1efce17-5954-4d64-b35e-8ac14d24e7b0\") " pod="openstack/glance-db-create-rqc56" Dec 13 06:48:24 crc kubenswrapper[5048]: I1213 06:48:24.018393 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-54a6-account-create-update-fvkg9"] Dec 13 06:48:24 crc kubenswrapper[5048]: W1213 06:48:24.020542 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda731241e_7fd1_45cb_9204_e71ff2a62a91.slice/crio-7385ecf7df53531fec2f83780cef606cb79d60ba219e23626379f1caa6ed76fd WatchSource:0}: Error finding container 7385ecf7df53531fec2f83780cef606cb79d60ba219e23626379f1caa6ed76fd: Status 404 returned error can't find the container with id 7385ecf7df53531fec2f83780cef606cb79d60ba219e23626379f1caa6ed76fd Dec 13 06:48:24 crc kubenswrapper[5048]: I1213 06:48:24.059459 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d5ec0a88-0ac6-437e-8aa2-293ac501fadc-operator-scripts\") pod \"glance-cb17-account-create-update-rhz4v\" (UID: \"d5ec0a88-0ac6-437e-8aa2-293ac501fadc\") " pod="openstack/glance-cb17-account-create-update-rhz4v" Dec 13 06:48:24 crc kubenswrapper[5048]: I1213 06:48:24.059768 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp57g\" (UniqueName: \"kubernetes.io/projected/d5ec0a88-0ac6-437e-8aa2-293ac501fadc-kube-api-access-rp57g\") pod \"glance-cb17-account-create-update-rhz4v\" (UID: \"d5ec0a88-0ac6-437e-8aa2-293ac501fadc\") " pod="openstack/glance-cb17-account-create-update-rhz4v" Dec 13 06:48:24 crc kubenswrapper[5048]: I1213 06:48:24.060616 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d5ec0a88-0ac6-437e-8aa2-293ac501fadc-operator-scripts\") pod \"glance-cb17-account-create-update-rhz4v\" (UID: \"d5ec0a88-0ac6-437e-8aa2-293ac501fadc\") " pod="openstack/glance-cb17-account-create-update-rhz4v" Dec 13 06:48:24 crc kubenswrapper[5048]: I1213 06:48:24.061331 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-nztt9"] Dec 13 06:48:24 crc kubenswrapper[5048]: W1213 06:48:24.063607 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9b80676d_e344_4949_8a27_22381100d354.slice/crio-6929f60e7da791e4b89cd42b4641daa980ff69dd66b975ad5c05dd1e62cea38e WatchSource:0}: Error finding container 6929f60e7da791e4b89cd42b4641daa980ff69dd66b975ad5c05dd1e62cea38e: Status 404 returned error can't find the container with id 6929f60e7da791e4b89cd42b4641daa980ff69dd66b975ad5c05dd1e62cea38e Dec 13 06:48:24 crc kubenswrapper[5048]: I1213 06:48:24.078339 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rp57g\" (UniqueName: \"kubernetes.io/projected/d5ec0a88-0ac6-437e-8aa2-293ac501fadc-kube-api-access-rp57g\") pod \"glance-cb17-account-create-update-rhz4v\" (UID: \"d5ec0a88-0ac6-437e-8aa2-293ac501fadc\") " pod="openstack/glance-cb17-account-create-update-rhz4v" Dec 13 06:48:24 crc kubenswrapper[5048]: I1213 06:48:24.083024 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-rqc56" Dec 13 06:48:24 crc kubenswrapper[5048]: I1213 06:48:24.173970 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-cb17-account-create-update-rhz4v" Dec 13 06:48:24 crc kubenswrapper[5048]: I1213 06:48:24.282086 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-bfzrz"] Dec 13 06:48:24 crc kubenswrapper[5048]: I1213 06:48:24.333930 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-dac3-account-create-update-nrtk8"] Dec 13 06:48:24 crc kubenswrapper[5048]: W1213 06:48:24.340637 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd790a6f6_a1d9_4f9c_84ae_de2358b07262.slice/crio-8b61d397dd47274d06c1c416ac856a5e204c4c9bf6831fa001fb033b0e0830ce WatchSource:0}: Error finding container 8b61d397dd47274d06c1c416ac856a5e204c4c9bf6831fa001fb033b0e0830ce: Status 404 returned error can't find the container with id 8b61d397dd47274d06c1c416ac856a5e204c4c9bf6831fa001fb033b0e0830ce Dec 13 06:48:24 crc kubenswrapper[5048]: I1213 06:48:24.418016 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-dac3-account-create-update-nrtk8" event={"ID":"d790a6f6-a1d9-4f9c-84ae-de2358b07262","Type":"ContainerStarted","Data":"8b61d397dd47274d06c1c416ac856a5e204c4c9bf6831fa001fb033b0e0830ce"} Dec 13 06:48:24 crc kubenswrapper[5048]: I1213 06:48:24.419739 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-54a6-account-create-update-fvkg9" event={"ID":"a731241e-7fd1-45cb-9204-e71ff2a62a91","Type":"ContainerStarted","Data":"7385ecf7df53531fec2f83780cef606cb79d60ba219e23626379f1caa6ed76fd"} Dec 13 06:48:24 crc kubenswrapper[5048]: I1213 06:48:24.422541 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-nztt9" event={"ID":"9b80676d-e344-4949-8a27-22381100d354","Type":"ContainerStarted","Data":"6929f60e7da791e4b89cd42b4641daa980ff69dd66b975ad5c05dd1e62cea38e"} Dec 13 06:48:24 crc kubenswrapper[5048]: I1213 06:48:24.424389 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-bfzrz" event={"ID":"19c49aa3-f62c-4c78-a8b7-a6858f7da04e","Type":"ContainerStarted","Data":"81f422e7da63c58bdb4a2b34cc38c9cee4aac606794c392eb57eb01eaa07024f"} Dec 13 06:48:24 crc kubenswrapper[5048]: I1213 06:48:24.529293 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-rqc56"] Dec 13 06:48:24 crc kubenswrapper[5048]: W1213 06:48:24.533541 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda1efce17_5954_4d64_b35e_8ac14d24e7b0.slice/crio-8550d0bf7e3fc689685fe596c17404e8a58057471a3c8d21349bc1e525a976c4 WatchSource:0}: Error finding container 8550d0bf7e3fc689685fe596c17404e8a58057471a3c8d21349bc1e525a976c4: Status 404 returned error can't find the container with id 8550d0bf7e3fc689685fe596c17404e8a58057471a3c8d21349bc1e525a976c4 Dec 13 06:48:24 crc kubenswrapper[5048]: I1213 06:48:24.704047 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-cb17-account-create-update-rhz4v"] Dec 13 06:48:24 crc kubenswrapper[5048]: I1213 06:48:24.879795 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-etc-swift\") pod \"swift-storage-0\" (UID: \"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4\") " pod="openstack/swift-storage-0" Dec 13 06:48:24 crc kubenswrapper[5048]: E1213 06:48:24.879965 5048 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 13 06:48:24 crc kubenswrapper[5048]: E1213 06:48:24.879989 5048 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 13 06:48:24 crc kubenswrapper[5048]: E1213 06:48:24.880033 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-etc-swift podName:b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4 nodeName:}" failed. No retries permitted until 2025-12-13 06:48:32.880016693 +0000 UTC m=+1146.746611264 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-etc-swift") pod "swift-storage-0" (UID: "b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4") : configmap "swift-ring-files" not found Dec 13 06:48:25 crc kubenswrapper[5048]: I1213 06:48:25.432937 5048 generic.go:334] "Generic (PLEG): container finished" podID="d790a6f6-a1d9-4f9c-84ae-de2358b07262" containerID="0392c1cbe1fce800c6d0e7030ce49be24f27d534b3da8652ab77cd70f5b9786f" exitCode=0 Dec 13 06:48:25 crc kubenswrapper[5048]: I1213 06:48:25.433517 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-dac3-account-create-update-nrtk8" event={"ID":"d790a6f6-a1d9-4f9c-84ae-de2358b07262","Type":"ContainerDied","Data":"0392c1cbe1fce800c6d0e7030ce49be24f27d534b3da8652ab77cd70f5b9786f"} Dec 13 06:48:25 crc kubenswrapper[5048]: I1213 06:48:25.434889 5048 generic.go:334] "Generic (PLEG): container finished" podID="a1efce17-5954-4d64-b35e-8ac14d24e7b0" containerID="47d1ae9ee992ee9bdd13d9886f9bd8fc67cf382bad934b6083cbef27680f3337" exitCode=0 Dec 13 06:48:25 crc kubenswrapper[5048]: I1213 06:48:25.434966 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-rqc56" event={"ID":"a1efce17-5954-4d64-b35e-8ac14d24e7b0","Type":"ContainerDied","Data":"47d1ae9ee992ee9bdd13d9886f9bd8fc67cf382bad934b6083cbef27680f3337"} Dec 13 06:48:25 crc kubenswrapper[5048]: I1213 06:48:25.434989 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-rqc56" event={"ID":"a1efce17-5954-4d64-b35e-8ac14d24e7b0","Type":"ContainerStarted","Data":"8550d0bf7e3fc689685fe596c17404e8a58057471a3c8d21349bc1e525a976c4"} Dec 13 06:48:25 crc kubenswrapper[5048]: I1213 06:48:25.436781 5048 generic.go:334] "Generic (PLEG): container finished" podID="a731241e-7fd1-45cb-9204-e71ff2a62a91" containerID="5e3952e75d057ef17b2c6beb8312bdb58d29b83e81bff8533cd0487bdc7b480c" exitCode=0 Dec 13 06:48:25 crc kubenswrapper[5048]: I1213 06:48:25.436861 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-54a6-account-create-update-fvkg9" event={"ID":"a731241e-7fd1-45cb-9204-e71ff2a62a91","Type":"ContainerDied","Data":"5e3952e75d057ef17b2c6beb8312bdb58d29b83e81bff8533cd0487bdc7b480c"} Dec 13 06:48:25 crc kubenswrapper[5048]: I1213 06:48:25.438301 5048 generic.go:334] "Generic (PLEG): container finished" podID="d5ec0a88-0ac6-437e-8aa2-293ac501fadc" containerID="2f9aeda042cb1889d878094a98cbcea9dac862690670083b09197057618a41ee" exitCode=0 Dec 13 06:48:25 crc kubenswrapper[5048]: I1213 06:48:25.438332 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-cb17-account-create-update-rhz4v" event={"ID":"d5ec0a88-0ac6-437e-8aa2-293ac501fadc","Type":"ContainerDied","Data":"2f9aeda042cb1889d878094a98cbcea9dac862690670083b09197057618a41ee"} Dec 13 06:48:25 crc kubenswrapper[5048]: I1213 06:48:25.438723 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-cb17-account-create-update-rhz4v" event={"ID":"d5ec0a88-0ac6-437e-8aa2-293ac501fadc","Type":"ContainerStarted","Data":"eff149617292afc51739793bb4b3dc8cc340e2b15ecc2b856a6ba3de9f0c8fde"} Dec 13 06:48:25 crc kubenswrapper[5048]: I1213 06:48:25.440038 5048 generic.go:334] "Generic (PLEG): container finished" podID="9b80676d-e344-4949-8a27-22381100d354" containerID="e0387975ec5e573db9d08ec659aef4ce71262e1ea721023bac3f0647ffebce10" exitCode=0 Dec 13 06:48:25 crc kubenswrapper[5048]: I1213 06:48:25.440068 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-nztt9" event={"ID":"9b80676d-e344-4949-8a27-22381100d354","Type":"ContainerDied","Data":"e0387975ec5e573db9d08ec659aef4ce71262e1ea721023bac3f0647ffebce10"} Dec 13 06:48:25 crc kubenswrapper[5048]: I1213 06:48:25.442783 5048 generic.go:334] "Generic (PLEG): container finished" podID="19c49aa3-f62c-4c78-a8b7-a6858f7da04e" containerID="b28c482610ca4bbf122d087c8b8a4e5294f8e089defe8a3a4a0edbc7f099c121" exitCode=0 Dec 13 06:48:25 crc kubenswrapper[5048]: I1213 06:48:25.442828 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-bfzrz" event={"ID":"19c49aa3-f62c-4c78-a8b7-a6858f7da04e","Type":"ContainerDied","Data":"b28c482610ca4bbf122d087c8b8a4e5294f8e089defe8a3a4a0edbc7f099c121"} Dec 13 06:48:26 crc kubenswrapper[5048]: I1213 06:48:26.820390 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-dac3-account-create-update-nrtk8" Dec 13 06:48:26 crc kubenswrapper[5048]: I1213 06:48:26.920193 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d790a6f6-a1d9-4f9c-84ae-de2358b07262-operator-scripts\") pod \"d790a6f6-a1d9-4f9c-84ae-de2358b07262\" (UID: \"d790a6f6-a1d9-4f9c-84ae-de2358b07262\") " Dec 13 06:48:26 crc kubenswrapper[5048]: I1213 06:48:26.920285 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hhxjb\" (UniqueName: \"kubernetes.io/projected/d790a6f6-a1d9-4f9c-84ae-de2358b07262-kube-api-access-hhxjb\") pod \"d790a6f6-a1d9-4f9c-84ae-de2358b07262\" (UID: \"d790a6f6-a1d9-4f9c-84ae-de2358b07262\") " Dec 13 06:48:26 crc kubenswrapper[5048]: I1213 06:48:26.921155 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d790a6f6-a1d9-4f9c-84ae-de2358b07262-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d790a6f6-a1d9-4f9c-84ae-de2358b07262" (UID: "d790a6f6-a1d9-4f9c-84ae-de2358b07262"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:48:26 crc kubenswrapper[5048]: I1213 06:48:26.927613 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d790a6f6-a1d9-4f9c-84ae-de2358b07262-kube-api-access-hhxjb" (OuterVolumeSpecName: "kube-api-access-hhxjb") pod "d790a6f6-a1d9-4f9c-84ae-de2358b07262" (UID: "d790a6f6-a1d9-4f9c-84ae-de2358b07262"). InnerVolumeSpecName "kube-api-access-hhxjb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.010484 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-j7782" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.022350 5048 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d790a6f6-a1d9-4f9c-84ae-de2358b07262-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.022392 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hhxjb\" (UniqueName: \"kubernetes.io/projected/d790a6f6-a1d9-4f9c-84ae-de2358b07262-kube-api-access-hhxjb\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.024477 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-cb17-account-create-update-rhz4v" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.037745 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-54a6-account-create-update-fvkg9" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.051727 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-nztt9" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.053062 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-bfzrz" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.071630 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-rqc56" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.085287 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-dwpg4"] Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.085533 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-dwpg4" podUID="4e4bfc5f-4bb9-4cd0-8686-854d60a380a4" containerName="dnsmasq-dns" containerID="cri-o://e030a9bce163962e4c0b52716c274fbbd5131790ef1734d5b652f81e375db45f" gracePeriod=10 Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.087650 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8554648995-dwpg4" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.126878 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rp57g\" (UniqueName: \"kubernetes.io/projected/d5ec0a88-0ac6-437e-8aa2-293ac501fadc-kube-api-access-rp57g\") pod \"d5ec0a88-0ac6-437e-8aa2-293ac501fadc\" (UID: \"d5ec0a88-0ac6-437e-8aa2-293ac501fadc\") " Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.126960 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a731241e-7fd1-45cb-9204-e71ff2a62a91-operator-scripts\") pod \"a731241e-7fd1-45cb-9204-e71ff2a62a91\" (UID: \"a731241e-7fd1-45cb-9204-e71ff2a62a91\") " Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.127120 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-df8m2\" (UniqueName: \"kubernetes.io/projected/a731241e-7fd1-45cb-9204-e71ff2a62a91-kube-api-access-df8m2\") pod \"a731241e-7fd1-45cb-9204-e71ff2a62a91\" (UID: \"a731241e-7fd1-45cb-9204-e71ff2a62a91\") " Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.127136 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d5ec0a88-0ac6-437e-8aa2-293ac501fadc-operator-scripts\") pod \"d5ec0a88-0ac6-437e-8aa2-293ac501fadc\" (UID: \"d5ec0a88-0ac6-437e-8aa2-293ac501fadc\") " Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.129097 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a731241e-7fd1-45cb-9204-e71ff2a62a91-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a731241e-7fd1-45cb-9204-e71ff2a62a91" (UID: "a731241e-7fd1-45cb-9204-e71ff2a62a91"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.129540 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d5ec0a88-0ac6-437e-8aa2-293ac501fadc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d5ec0a88-0ac6-437e-8aa2-293ac501fadc" (UID: "d5ec0a88-0ac6-437e-8aa2-293ac501fadc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.132267 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a731241e-7fd1-45cb-9204-e71ff2a62a91-kube-api-access-df8m2" (OuterVolumeSpecName: "kube-api-access-df8m2") pod "a731241e-7fd1-45cb-9204-e71ff2a62a91" (UID: "a731241e-7fd1-45cb-9204-e71ff2a62a91"). InnerVolumeSpecName "kube-api-access-df8m2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.132309 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5ec0a88-0ac6-437e-8aa2-293ac501fadc-kube-api-access-rp57g" (OuterVolumeSpecName: "kube-api-access-rp57g") pod "d5ec0a88-0ac6-437e-8aa2-293ac501fadc" (UID: "d5ec0a88-0ac6-437e-8aa2-293ac501fadc"). InnerVolumeSpecName "kube-api-access-rp57g". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.227982 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b80676d-e344-4949-8a27-22381100d354-operator-scripts\") pod \"9b80676d-e344-4949-8a27-22381100d354\" (UID: \"9b80676d-e344-4949-8a27-22381100d354\") " Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.228073 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tj58w\" (UniqueName: \"kubernetes.io/projected/19c49aa3-f62c-4c78-a8b7-a6858f7da04e-kube-api-access-tj58w\") pod \"19c49aa3-f62c-4c78-a8b7-a6858f7da04e\" (UID: \"19c49aa3-f62c-4c78-a8b7-a6858f7da04e\") " Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.228220 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c65wg\" (UniqueName: \"kubernetes.io/projected/a1efce17-5954-4d64-b35e-8ac14d24e7b0-kube-api-access-c65wg\") pod \"a1efce17-5954-4d64-b35e-8ac14d24e7b0\" (UID: \"a1efce17-5954-4d64-b35e-8ac14d24e7b0\") " Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.228271 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19c49aa3-f62c-4c78-a8b7-a6858f7da04e-operator-scripts\") pod \"19c49aa3-f62c-4c78-a8b7-a6858f7da04e\" (UID: \"19c49aa3-f62c-4c78-a8b7-a6858f7da04e\") " Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.228294 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a1efce17-5954-4d64-b35e-8ac14d24e7b0-operator-scripts\") pod \"a1efce17-5954-4d64-b35e-8ac14d24e7b0\" (UID: \"a1efce17-5954-4d64-b35e-8ac14d24e7b0\") " Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.228314 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zw5x4\" (UniqueName: \"kubernetes.io/projected/9b80676d-e344-4949-8a27-22381100d354-kube-api-access-zw5x4\") pod \"9b80676d-e344-4949-8a27-22381100d354\" (UID: \"9b80676d-e344-4949-8a27-22381100d354\") " Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.228649 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-df8m2\" (UniqueName: \"kubernetes.io/projected/a731241e-7fd1-45cb-9204-e71ff2a62a91-kube-api-access-df8m2\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.228670 5048 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d5ec0a88-0ac6-437e-8aa2-293ac501fadc-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.228680 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rp57g\" (UniqueName: \"kubernetes.io/projected/d5ec0a88-0ac6-437e-8aa2-293ac501fadc-kube-api-access-rp57g\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.228689 5048 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a731241e-7fd1-45cb-9204-e71ff2a62a91-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.232454 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1efce17-5954-4d64-b35e-8ac14d24e7b0-kube-api-access-c65wg" (OuterVolumeSpecName: "kube-api-access-c65wg") pod "a1efce17-5954-4d64-b35e-8ac14d24e7b0" (UID: "a1efce17-5954-4d64-b35e-8ac14d24e7b0"). InnerVolumeSpecName "kube-api-access-c65wg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.232508 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1efce17-5954-4d64-b35e-8ac14d24e7b0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a1efce17-5954-4d64-b35e-8ac14d24e7b0" (UID: "a1efce17-5954-4d64-b35e-8ac14d24e7b0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.232477 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b80676d-e344-4949-8a27-22381100d354-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9b80676d-e344-4949-8a27-22381100d354" (UID: "9b80676d-e344-4949-8a27-22381100d354"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.232489 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b80676d-e344-4949-8a27-22381100d354-kube-api-access-zw5x4" (OuterVolumeSpecName: "kube-api-access-zw5x4") pod "9b80676d-e344-4949-8a27-22381100d354" (UID: "9b80676d-e344-4949-8a27-22381100d354"). InnerVolumeSpecName "kube-api-access-zw5x4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.233075 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/19c49aa3-f62c-4c78-a8b7-a6858f7da04e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "19c49aa3-f62c-4c78-a8b7-a6858f7da04e" (UID: "19c49aa3-f62c-4c78-a8b7-a6858f7da04e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.235321 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19c49aa3-f62c-4c78-a8b7-a6858f7da04e-kube-api-access-tj58w" (OuterVolumeSpecName: "kube-api-access-tj58w") pod "19c49aa3-f62c-4c78-a8b7-a6858f7da04e" (UID: "19c49aa3-f62c-4c78-a8b7-a6858f7da04e"). InnerVolumeSpecName "kube-api-access-tj58w". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.330080 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c65wg\" (UniqueName: \"kubernetes.io/projected/a1efce17-5954-4d64-b35e-8ac14d24e7b0-kube-api-access-c65wg\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.330130 5048 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19c49aa3-f62c-4c78-a8b7-a6858f7da04e-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.330140 5048 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a1efce17-5954-4d64-b35e-8ac14d24e7b0-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.330149 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zw5x4\" (UniqueName: \"kubernetes.io/projected/9b80676d-e344-4949-8a27-22381100d354-kube-api-access-zw5x4\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.330158 5048 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b80676d-e344-4949-8a27-22381100d354-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.330166 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tj58w\" (UniqueName: \"kubernetes.io/projected/19c49aa3-f62c-4c78-a8b7-a6858f7da04e-kube-api-access-tj58w\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.459623 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-bfzrz" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.459637 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-bfzrz" event={"ID":"19c49aa3-f62c-4c78-a8b7-a6858f7da04e","Type":"ContainerDied","Data":"81f422e7da63c58bdb4a2b34cc38c9cee4aac606794c392eb57eb01eaa07024f"} Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.459691 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="81f422e7da63c58bdb4a2b34cc38c9cee4aac606794c392eb57eb01eaa07024f" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.461179 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-dac3-account-create-update-nrtk8" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.461217 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-dac3-account-create-update-nrtk8" event={"ID":"d790a6f6-a1d9-4f9c-84ae-de2358b07262","Type":"ContainerDied","Data":"8b61d397dd47274d06c1c416ac856a5e204c4c9bf6831fa001fb033b0e0830ce"} Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.461240 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8b61d397dd47274d06c1c416ac856a5e204c4c9bf6831fa001fb033b0e0830ce" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.462625 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-rqc56" event={"ID":"a1efce17-5954-4d64-b35e-8ac14d24e7b0","Type":"ContainerDied","Data":"8550d0bf7e3fc689685fe596c17404e8a58057471a3c8d21349bc1e525a976c4"} Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.462661 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8550d0bf7e3fc689685fe596c17404e8a58057471a3c8d21349bc1e525a976c4" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.462718 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-rqc56" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.477957 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-54a6-account-create-update-fvkg9" event={"ID":"a731241e-7fd1-45cb-9204-e71ff2a62a91","Type":"ContainerDied","Data":"7385ecf7df53531fec2f83780cef606cb79d60ba219e23626379f1caa6ed76fd"} Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.478013 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7385ecf7df53531fec2f83780cef606cb79d60ba219e23626379f1caa6ed76fd" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.478054 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-54a6-account-create-update-fvkg9" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.481205 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-cb17-account-create-update-rhz4v" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.481315 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-cb17-account-create-update-rhz4v" event={"ID":"d5ec0a88-0ac6-437e-8aa2-293ac501fadc","Type":"ContainerDied","Data":"eff149617292afc51739793bb4b3dc8cc340e2b15ecc2b856a6ba3de9f0c8fde"} Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.481363 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eff149617292afc51739793bb4b3dc8cc340e2b15ecc2b856a6ba3de9f0c8fde" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.482741 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-nztt9" event={"ID":"9b80676d-e344-4949-8a27-22381100d354","Type":"ContainerDied","Data":"6929f60e7da791e4b89cd42b4641daa980ff69dd66b975ad5c05dd1e62cea38e"} Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.482774 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6929f60e7da791e4b89cd42b4641daa980ff69dd66b975ad5c05dd1e62cea38e" Dec 13 06:48:27 crc kubenswrapper[5048]: I1213 06:48:27.482876 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-nztt9" Dec 13 06:48:28 crc kubenswrapper[5048]: I1213 06:48:28.491829 5048 generic.go:334] "Generic (PLEG): container finished" podID="4e4bfc5f-4bb9-4cd0-8686-854d60a380a4" containerID="e030a9bce163962e4c0b52716c274fbbd5131790ef1734d5b652f81e375db45f" exitCode=0 Dec 13 06:48:28 crc kubenswrapper[5048]: I1213 06:48:28.491901 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-dwpg4" event={"ID":"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4","Type":"ContainerDied","Data":"e030a9bce163962e4c0b52716c274fbbd5131790ef1734d5b652f81e375db45f"} Dec 13 06:48:28 crc kubenswrapper[5048]: I1213 06:48:28.859808 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-dwpg4" Dec 13 06:48:28 crc kubenswrapper[5048]: I1213 06:48:28.978220 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-ovsdbserver-nb\") pod \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\" (UID: \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\") " Dec 13 06:48:28 crc kubenswrapper[5048]: I1213 06:48:28.978332 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rpzt6\" (UniqueName: \"kubernetes.io/projected/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-kube-api-access-rpzt6\") pod \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\" (UID: \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\") " Dec 13 06:48:28 crc kubenswrapper[5048]: I1213 06:48:28.978394 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-dns-svc\") pod \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\" (UID: \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\") " Dec 13 06:48:28 crc kubenswrapper[5048]: I1213 06:48:28.979930 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-ovsdbserver-sb\") pod \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\" (UID: \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\") " Dec 13 06:48:28 crc kubenswrapper[5048]: I1213 06:48:28.980013 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-config\") pod \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\" (UID: \"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4\") " Dec 13 06:48:28 crc kubenswrapper[5048]: I1213 06:48:28.990661 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-kube-api-access-rpzt6" (OuterVolumeSpecName: "kube-api-access-rpzt6") pod "4e4bfc5f-4bb9-4cd0-8686-854d60a380a4" (UID: "4e4bfc5f-4bb9-4cd0-8686-854d60a380a4"). InnerVolumeSpecName "kube-api-access-rpzt6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.035851 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4e4bfc5f-4bb9-4cd0-8686-854d60a380a4" (UID: "4e4bfc5f-4bb9-4cd0-8686-854d60a380a4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.042780 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-config" (OuterVolumeSpecName: "config") pod "4e4bfc5f-4bb9-4cd0-8686-854d60a380a4" (UID: "4e4bfc5f-4bb9-4cd0-8686-854d60a380a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.042794 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4e4bfc5f-4bb9-4cd0-8686-854d60a380a4" (UID: "4e4bfc5f-4bb9-4cd0-8686-854d60a380a4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.044926 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4e4bfc5f-4bb9-4cd0-8686-854d60a380a4" (UID: "4e4bfc5f-4bb9-4cd0-8686-854d60a380a4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.081701 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.081746 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.081758 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.081820 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rpzt6\" (UniqueName: \"kubernetes.io/projected/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-kube-api-access-rpzt6\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.081842 5048 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.095425 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-hgfjd"] Dec 13 06:48:29 crc kubenswrapper[5048]: E1213 06:48:29.097466 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e4bfc5f-4bb9-4cd0-8686-854d60a380a4" containerName="init" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.097488 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e4bfc5f-4bb9-4cd0-8686-854d60a380a4" containerName="init" Dec 13 06:48:29 crc kubenswrapper[5048]: E1213 06:48:29.097505 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a731241e-7fd1-45cb-9204-e71ff2a62a91" containerName="mariadb-account-create-update" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.097514 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="a731241e-7fd1-45cb-9204-e71ff2a62a91" containerName="mariadb-account-create-update" Dec 13 06:48:29 crc kubenswrapper[5048]: E1213 06:48:29.097529 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19c49aa3-f62c-4c78-a8b7-a6858f7da04e" containerName="mariadb-database-create" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.097537 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="19c49aa3-f62c-4c78-a8b7-a6858f7da04e" containerName="mariadb-database-create" Dec 13 06:48:29 crc kubenswrapper[5048]: E1213 06:48:29.097554 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1efce17-5954-4d64-b35e-8ac14d24e7b0" containerName="mariadb-database-create" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.097562 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1efce17-5954-4d64-b35e-8ac14d24e7b0" containerName="mariadb-database-create" Dec 13 06:48:29 crc kubenswrapper[5048]: E1213 06:48:29.097575 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e4bfc5f-4bb9-4cd0-8686-854d60a380a4" containerName="dnsmasq-dns" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.097582 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e4bfc5f-4bb9-4cd0-8686-854d60a380a4" containerName="dnsmasq-dns" Dec 13 06:48:29 crc kubenswrapper[5048]: E1213 06:48:29.097592 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d790a6f6-a1d9-4f9c-84ae-de2358b07262" containerName="mariadb-account-create-update" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.097600 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="d790a6f6-a1d9-4f9c-84ae-de2358b07262" containerName="mariadb-account-create-update" Dec 13 06:48:29 crc kubenswrapper[5048]: E1213 06:48:29.097610 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b80676d-e344-4949-8a27-22381100d354" containerName="mariadb-database-create" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.097618 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b80676d-e344-4949-8a27-22381100d354" containerName="mariadb-database-create" Dec 13 06:48:29 crc kubenswrapper[5048]: E1213 06:48:29.097629 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5ec0a88-0ac6-437e-8aa2-293ac501fadc" containerName="mariadb-account-create-update" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.097637 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5ec0a88-0ac6-437e-8aa2-293ac501fadc" containerName="mariadb-account-create-update" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.097889 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="d790a6f6-a1d9-4f9c-84ae-de2358b07262" containerName="mariadb-account-create-update" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.097910 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="a731241e-7fd1-45cb-9204-e71ff2a62a91" containerName="mariadb-account-create-update" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.097922 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e4bfc5f-4bb9-4cd0-8686-854d60a380a4" containerName="dnsmasq-dns" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.097935 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1efce17-5954-4d64-b35e-8ac14d24e7b0" containerName="mariadb-database-create" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.097943 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b80676d-e344-4949-8a27-22381100d354" containerName="mariadb-database-create" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.097957 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5ec0a88-0ac6-437e-8aa2-293ac501fadc" containerName="mariadb-account-create-update" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.097969 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="19c49aa3-f62c-4c78-a8b7-a6858f7da04e" containerName="mariadb-database-create" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.098597 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-hgfjd" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.105135 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-9stwb" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.105484 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.107448 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-hgfjd"] Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.183883 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-db-sync-config-data\") pod \"glance-db-sync-hgfjd\" (UID: \"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7\") " pod="openstack/glance-db-sync-hgfjd" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.183952 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-config-data\") pod \"glance-db-sync-hgfjd\" (UID: \"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7\") " pod="openstack/glance-db-sync-hgfjd" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.183975 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-combined-ca-bundle\") pod \"glance-db-sync-hgfjd\" (UID: \"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7\") " pod="openstack/glance-db-sync-hgfjd" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.184026 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gjbh\" (UniqueName: \"kubernetes.io/projected/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-kube-api-access-9gjbh\") pod \"glance-db-sync-hgfjd\" (UID: \"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7\") " pod="openstack/glance-db-sync-hgfjd" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.285125 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-db-sync-config-data\") pod \"glance-db-sync-hgfjd\" (UID: \"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7\") " pod="openstack/glance-db-sync-hgfjd" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.285213 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-config-data\") pod \"glance-db-sync-hgfjd\" (UID: \"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7\") " pod="openstack/glance-db-sync-hgfjd" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.285240 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-combined-ca-bundle\") pod \"glance-db-sync-hgfjd\" (UID: \"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7\") " pod="openstack/glance-db-sync-hgfjd" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.285301 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gjbh\" (UniqueName: \"kubernetes.io/projected/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-kube-api-access-9gjbh\") pod \"glance-db-sync-hgfjd\" (UID: \"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7\") " pod="openstack/glance-db-sync-hgfjd" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.288991 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-combined-ca-bundle\") pod \"glance-db-sync-hgfjd\" (UID: \"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7\") " pod="openstack/glance-db-sync-hgfjd" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.289059 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-config-data\") pod \"glance-db-sync-hgfjd\" (UID: \"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7\") " pod="openstack/glance-db-sync-hgfjd" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.289321 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-db-sync-config-data\") pod \"glance-db-sync-hgfjd\" (UID: \"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7\") " pod="openstack/glance-db-sync-hgfjd" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.302160 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gjbh\" (UniqueName: \"kubernetes.io/projected/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-kube-api-access-9gjbh\") pod \"glance-db-sync-hgfjd\" (UID: \"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7\") " pod="openstack/glance-db-sync-hgfjd" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.421511 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-hgfjd" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.513113 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-dwpg4" event={"ID":"4e4bfc5f-4bb9-4cd0-8686-854d60a380a4","Type":"ContainerDied","Data":"0bfca8647b79d07ebb81578a9afe71a866ca78f61eccf72a93c66e66c1c0b594"} Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.513188 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-dwpg4" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.513365 5048 scope.go:117] "RemoveContainer" containerID="e030a9bce163962e4c0b52716c274fbbd5131790ef1734d5b652f81e375db45f" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.562201 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-dwpg4"] Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.566904 5048 scope.go:117] "RemoveContainer" containerID="67d09d55b28e40d3532eb26640c5e0b97c5e9f0fdf2082c143a1ba2ba22218ee" Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.592109 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-dwpg4"] Dec 13 06:48:29 crc kubenswrapper[5048]: I1213 06:48:29.997010 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-hgfjd"] Dec 13 06:48:30 crc kubenswrapper[5048]: I1213 06:48:30.521427 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-hgfjd" event={"ID":"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7","Type":"ContainerStarted","Data":"9d2e30435ff821a510b6e99fb8d4c8fa90bef6274ef2c9c9277762fdfc69ae39"} Dec 13 06:48:30 crc kubenswrapper[5048]: I1213 06:48:30.576064 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e4bfc5f-4bb9-4cd0-8686-854d60a380a4" path="/var/lib/kubelet/pods/4e4bfc5f-4bb9-4cd0-8686-854d60a380a4/volumes" Dec 13 06:48:32 crc kubenswrapper[5048]: I1213 06:48:32.881811 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-etc-swift\") pod \"swift-storage-0\" (UID: \"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4\") " pod="openstack/swift-storage-0" Dec 13 06:48:32 crc kubenswrapper[5048]: E1213 06:48:32.882016 5048 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 13 06:48:32 crc kubenswrapper[5048]: E1213 06:48:32.882174 5048 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 13 06:48:32 crc kubenswrapper[5048]: E1213 06:48:32.882238 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-etc-swift podName:b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4 nodeName:}" failed. No retries permitted until 2025-12-13 06:48:48.882221051 +0000 UTC m=+1162.748815632 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-etc-swift") pod "swift-storage-0" (UID: "b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4") : configmap "swift-ring-files" not found Dec 13 06:48:34 crc kubenswrapper[5048]: I1213 06:48:34.333340 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Dec 13 06:48:36 crc kubenswrapper[5048]: I1213 06:48:36.592933 5048 generic.go:334] "Generic (PLEG): container finished" podID="76f71d51-d887-428e-bcf0-e07a75cda134" containerID="e20c99d9f28d3fe7aeb127ebec094b91b05ef295539a0ac60516d368f72fbab9" exitCode=0 Dec 13 06:48:36 crc kubenswrapper[5048]: I1213 06:48:36.593020 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-xs94r" event={"ID":"76f71d51-d887-428e-bcf0-e07a75cda134","Type":"ContainerDied","Data":"e20c99d9f28d3fe7aeb127ebec094b91b05ef295539a0ac60516d368f72fbab9"} Dec 13 06:48:37 crc kubenswrapper[5048]: I1213 06:48:37.927203 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.112883 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/76f71d51-d887-428e-bcf0-e07a75cda134-dispersionconf\") pod \"76f71d51-d887-428e-bcf0-e07a75cda134\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.113224 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/76f71d51-d887-428e-bcf0-e07a75cda134-etc-swift\") pod \"76f71d51-d887-428e-bcf0-e07a75cda134\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.113247 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/76f71d51-d887-428e-bcf0-e07a75cda134-ring-data-devices\") pod \"76f71d51-d887-428e-bcf0-e07a75cda134\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.113288 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/76f71d51-d887-428e-bcf0-e07a75cda134-scripts\") pod \"76f71d51-d887-428e-bcf0-e07a75cda134\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.113412 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76f71d51-d887-428e-bcf0-e07a75cda134-combined-ca-bundle\") pod \"76f71d51-d887-428e-bcf0-e07a75cda134\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.113472 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/76f71d51-d887-428e-bcf0-e07a75cda134-swiftconf\") pod \"76f71d51-d887-428e-bcf0-e07a75cda134\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.113523 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k8wnk\" (UniqueName: \"kubernetes.io/projected/76f71d51-d887-428e-bcf0-e07a75cda134-kube-api-access-k8wnk\") pod \"76f71d51-d887-428e-bcf0-e07a75cda134\" (UID: \"76f71d51-d887-428e-bcf0-e07a75cda134\") " Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.113937 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/76f71d51-d887-428e-bcf0-e07a75cda134-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "76f71d51-d887-428e-bcf0-e07a75cda134" (UID: "76f71d51-d887-428e-bcf0-e07a75cda134"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.114244 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76f71d51-d887-428e-bcf0-e07a75cda134-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "76f71d51-d887-428e-bcf0-e07a75cda134" (UID: "76f71d51-d887-428e-bcf0-e07a75cda134"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.118555 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76f71d51-d887-428e-bcf0-e07a75cda134-kube-api-access-k8wnk" (OuterVolumeSpecName: "kube-api-access-k8wnk") pod "76f71d51-d887-428e-bcf0-e07a75cda134" (UID: "76f71d51-d887-428e-bcf0-e07a75cda134"). InnerVolumeSpecName "kube-api-access-k8wnk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.121333 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76f71d51-d887-428e-bcf0-e07a75cda134-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "76f71d51-d887-428e-bcf0-e07a75cda134" (UID: "76f71d51-d887-428e-bcf0-e07a75cda134"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.135427 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/76f71d51-d887-428e-bcf0-e07a75cda134-scripts" (OuterVolumeSpecName: "scripts") pod "76f71d51-d887-428e-bcf0-e07a75cda134" (UID: "76f71d51-d887-428e-bcf0-e07a75cda134"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.135544 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76f71d51-d887-428e-bcf0-e07a75cda134-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "76f71d51-d887-428e-bcf0-e07a75cda134" (UID: "76f71d51-d887-428e-bcf0-e07a75cda134"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.137224 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76f71d51-d887-428e-bcf0-e07a75cda134-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "76f71d51-d887-428e-bcf0-e07a75cda134" (UID: "76f71d51-d887-428e-bcf0-e07a75cda134"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.214922 5048 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/76f71d51-d887-428e-bcf0-e07a75cda134-dispersionconf\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.214948 5048 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/76f71d51-d887-428e-bcf0-e07a75cda134-etc-swift\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.214957 5048 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/76f71d51-d887-428e-bcf0-e07a75cda134-ring-data-devices\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.214968 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/76f71d51-d887-428e-bcf0-e07a75cda134-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.214976 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76f71d51-d887-428e-bcf0-e07a75cda134-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.214984 5048 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/76f71d51-d887-428e-bcf0-e07a75cda134-swiftconf\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.214992 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k8wnk\" (UniqueName: \"kubernetes.io/projected/76f71d51-d887-428e-bcf0-e07a75cda134-kube-api-access-k8wnk\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.610413 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-xs94r" event={"ID":"76f71d51-d887-428e-bcf0-e07a75cda134","Type":"ContainerDied","Data":"0cd3daa0dfda4436dfe10e57a779dcb364097126a38ed53a0334d25a4fd899d2"} Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.610477 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0cd3daa0dfda4436dfe10e57a779dcb364097126a38ed53a0334d25a4fd899d2" Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.610561 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-xs94r" Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.858336 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-2sh28" podUID="a8258a39-dbbb-4672-9d88-22749f0c9563" containerName="ovn-controller" probeResult="failure" output=< Dec 13 06:48:38 crc kubenswrapper[5048]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Dec 13 06:48:38 crc kubenswrapper[5048]: > Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.884224 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:48:38 crc kubenswrapper[5048]: I1213 06:48:38.894685 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-x5dfv" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.121935 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-2sh28-config-xm4j8"] Dec 13 06:48:39 crc kubenswrapper[5048]: E1213 06:48:39.122359 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76f71d51-d887-428e-bcf0-e07a75cda134" containerName="swift-ring-rebalance" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.122424 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="76f71d51-d887-428e-bcf0-e07a75cda134" containerName="swift-ring-rebalance" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.122725 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="76f71d51-d887-428e-bcf0-e07a75cda134" containerName="swift-ring-rebalance" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.123340 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-2sh28-config-xm4j8" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.126177 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.138470 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-2sh28-config-xm4j8"] Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.236212 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a43f00b0-1103-42d2-ae89-c3bf51d845b8-scripts\") pod \"ovn-controller-2sh28-config-xm4j8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " pod="openstack/ovn-controller-2sh28-config-xm4j8" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.236297 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a43f00b0-1103-42d2-ae89-c3bf51d845b8-var-run\") pod \"ovn-controller-2sh28-config-xm4j8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " pod="openstack/ovn-controller-2sh28-config-xm4j8" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.236500 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bff8j\" (UniqueName: \"kubernetes.io/projected/a43f00b0-1103-42d2-ae89-c3bf51d845b8-kube-api-access-bff8j\") pod \"ovn-controller-2sh28-config-xm4j8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " pod="openstack/ovn-controller-2sh28-config-xm4j8" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.236599 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/a43f00b0-1103-42d2-ae89-c3bf51d845b8-var-run-ovn\") pod \"ovn-controller-2sh28-config-xm4j8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " pod="openstack/ovn-controller-2sh28-config-xm4j8" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.236814 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/a43f00b0-1103-42d2-ae89-c3bf51d845b8-additional-scripts\") pod \"ovn-controller-2sh28-config-xm4j8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " pod="openstack/ovn-controller-2sh28-config-xm4j8" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.236862 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/a43f00b0-1103-42d2-ae89-c3bf51d845b8-var-log-ovn\") pod \"ovn-controller-2sh28-config-xm4j8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " pod="openstack/ovn-controller-2sh28-config-xm4j8" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.337901 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a43f00b0-1103-42d2-ae89-c3bf51d845b8-scripts\") pod \"ovn-controller-2sh28-config-xm4j8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " pod="openstack/ovn-controller-2sh28-config-xm4j8" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.337969 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a43f00b0-1103-42d2-ae89-c3bf51d845b8-var-run\") pod \"ovn-controller-2sh28-config-xm4j8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " pod="openstack/ovn-controller-2sh28-config-xm4j8" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.338031 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bff8j\" (UniqueName: \"kubernetes.io/projected/a43f00b0-1103-42d2-ae89-c3bf51d845b8-kube-api-access-bff8j\") pod \"ovn-controller-2sh28-config-xm4j8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " pod="openstack/ovn-controller-2sh28-config-xm4j8" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.338068 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/a43f00b0-1103-42d2-ae89-c3bf51d845b8-var-run-ovn\") pod \"ovn-controller-2sh28-config-xm4j8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " pod="openstack/ovn-controller-2sh28-config-xm4j8" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.338145 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/a43f00b0-1103-42d2-ae89-c3bf51d845b8-additional-scripts\") pod \"ovn-controller-2sh28-config-xm4j8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " pod="openstack/ovn-controller-2sh28-config-xm4j8" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.338165 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/a43f00b0-1103-42d2-ae89-c3bf51d845b8-var-log-ovn\") pod \"ovn-controller-2sh28-config-xm4j8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " pod="openstack/ovn-controller-2sh28-config-xm4j8" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.338498 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/a43f00b0-1103-42d2-ae89-c3bf51d845b8-var-log-ovn\") pod \"ovn-controller-2sh28-config-xm4j8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " pod="openstack/ovn-controller-2sh28-config-xm4j8" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.338561 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/a43f00b0-1103-42d2-ae89-c3bf51d845b8-var-run-ovn\") pod \"ovn-controller-2sh28-config-xm4j8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " pod="openstack/ovn-controller-2sh28-config-xm4j8" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.338609 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a43f00b0-1103-42d2-ae89-c3bf51d845b8-var-run\") pod \"ovn-controller-2sh28-config-xm4j8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " pod="openstack/ovn-controller-2sh28-config-xm4j8" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.339975 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/a43f00b0-1103-42d2-ae89-c3bf51d845b8-additional-scripts\") pod \"ovn-controller-2sh28-config-xm4j8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " pod="openstack/ovn-controller-2sh28-config-xm4j8" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.340636 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a43f00b0-1103-42d2-ae89-c3bf51d845b8-scripts\") pod \"ovn-controller-2sh28-config-xm4j8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " pod="openstack/ovn-controller-2sh28-config-xm4j8" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.361900 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bff8j\" (UniqueName: \"kubernetes.io/projected/a43f00b0-1103-42d2-ae89-c3bf51d845b8-kube-api-access-bff8j\") pod \"ovn-controller-2sh28-config-xm4j8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " pod="openstack/ovn-controller-2sh28-config-xm4j8" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.441539 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-2sh28-config-xm4j8" Dec 13 06:48:39 crc kubenswrapper[5048]: I1213 06:48:39.979846 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-2sh28-config-xm4j8"] Dec 13 06:48:39 crc kubenswrapper[5048]: W1213 06:48:39.988655 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda43f00b0_1103_42d2_ae89_c3bf51d845b8.slice/crio-b65c51142f7b1828a7e2bc881cb92d917a5b5763933c6a88564ccaead70371ff WatchSource:0}: Error finding container b65c51142f7b1828a7e2bc881cb92d917a5b5763933c6a88564ccaead70371ff: Status 404 returned error can't find the container with id b65c51142f7b1828a7e2bc881cb92d917a5b5763933c6a88564ccaead70371ff Dec 13 06:48:40 crc kubenswrapper[5048]: I1213 06:48:40.627688 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-2sh28-config-xm4j8" event={"ID":"a43f00b0-1103-42d2-ae89-c3bf51d845b8","Type":"ContainerStarted","Data":"b65c51142f7b1828a7e2bc881cb92d917a5b5763933c6a88564ccaead70371ff"} Dec 13 06:48:42 crc kubenswrapper[5048]: I1213 06:48:42.654052 5048 generic.go:334] "Generic (PLEG): container finished" podID="a43f00b0-1103-42d2-ae89-c3bf51d845b8" containerID="30ab5c4147986006ad8791488083740ccea5352177bdcb3ae2e820d24177a5a4" exitCode=0 Dec 13 06:48:42 crc kubenswrapper[5048]: I1213 06:48:42.654183 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-2sh28-config-xm4j8" event={"ID":"a43f00b0-1103-42d2-ae89-c3bf51d845b8","Type":"ContainerDied","Data":"30ab5c4147986006ad8791488083740ccea5352177bdcb3ae2e820d24177a5a4"} Dec 13 06:48:43 crc kubenswrapper[5048]: I1213 06:48:43.871271 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-2sh28" Dec 13 06:48:45 crc kubenswrapper[5048]: I1213 06:48:45.676376 5048 generic.go:334] "Generic (PLEG): container finished" podID="e3a0bce1-8848-4ac7-a030-19640b952708" containerID="75ee9b498965105594517eef849b5ed0c40d73c2d2599da5a3eae4643255329f" exitCode=0 Dec 13 06:48:45 crc kubenswrapper[5048]: I1213 06:48:45.676461 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e3a0bce1-8848-4ac7-a030-19640b952708","Type":"ContainerDied","Data":"75ee9b498965105594517eef849b5ed0c40d73c2d2599da5a3eae4643255329f"} Dec 13 06:48:45 crc kubenswrapper[5048]: I1213 06:48:45.679607 5048 generic.go:334] "Generic (PLEG): container finished" podID="a7bbc535-10f7-44cc-89a6-cbb697149e4a" containerID="7a1fa96f3b72ad7f4e5857f6a5eb077b15e2b09603a5ab2399dffe44b8b8c7fd" exitCode=0 Dec 13 06:48:45 crc kubenswrapper[5048]: I1213 06:48:45.679634 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"a7bbc535-10f7-44cc-89a6-cbb697149e4a","Type":"ContainerDied","Data":"7a1fa96f3b72ad7f4e5857f6a5eb077b15e2b09603a5ab2399dffe44b8b8c7fd"} Dec 13 06:48:46 crc kubenswrapper[5048]: I1213 06:48:46.216213 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:48:46 crc kubenswrapper[5048]: I1213 06:48:46.216511 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:48:48 crc kubenswrapper[5048]: I1213 06:48:48.903861 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-etc-swift\") pod \"swift-storage-0\" (UID: \"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4\") " pod="openstack/swift-storage-0" Dec 13 06:48:48 crc kubenswrapper[5048]: I1213 06:48:48.911475 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4-etc-swift\") pod \"swift-storage-0\" (UID: \"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4\") " pod="openstack/swift-storage-0" Dec 13 06:48:49 crc kubenswrapper[5048]: I1213 06:48:49.052182 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Dec 13 06:48:50 crc kubenswrapper[5048]: E1213 06:48:50.771101 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Dec 13 06:48:50 crc kubenswrapper[5048]: E1213 06:48:50.771708 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9gjbh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-hgfjd_openstack(91aa9cb1-7cd0-4cd3-9918-460b5d976ab7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:48:50 crc kubenswrapper[5048]: E1213 06:48:50.772932 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-hgfjd" podUID="91aa9cb1-7cd0-4cd3-9918-460b5d976ab7" Dec 13 06:48:50 crc kubenswrapper[5048]: I1213 06:48:50.914285 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-2sh28-config-xm4j8" Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.045629 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a43f00b0-1103-42d2-ae89-c3bf51d845b8-var-run\") pod \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.045964 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bff8j\" (UniqueName: \"kubernetes.io/projected/a43f00b0-1103-42d2-ae89-c3bf51d845b8-kube-api-access-bff8j\") pod \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.046079 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/a43f00b0-1103-42d2-ae89-c3bf51d845b8-var-run-ovn\") pod \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.045815 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a43f00b0-1103-42d2-ae89-c3bf51d845b8-var-run" (OuterVolumeSpecName: "var-run") pod "a43f00b0-1103-42d2-ae89-c3bf51d845b8" (UID: "a43f00b0-1103-42d2-ae89-c3bf51d845b8"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.046114 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/a43f00b0-1103-42d2-ae89-c3bf51d845b8-additional-scripts\") pod \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.046142 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a43f00b0-1103-42d2-ae89-c3bf51d845b8-scripts\") pod \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.046197 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a43f00b0-1103-42d2-ae89-c3bf51d845b8-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "a43f00b0-1103-42d2-ae89-c3bf51d845b8" (UID: "a43f00b0-1103-42d2-ae89-c3bf51d845b8"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.046239 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/a43f00b0-1103-42d2-ae89-c3bf51d845b8-var-log-ovn\") pod \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\" (UID: \"a43f00b0-1103-42d2-ae89-c3bf51d845b8\") " Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.046533 5048 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a43f00b0-1103-42d2-ae89-c3bf51d845b8-var-run\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.046544 5048 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/a43f00b0-1103-42d2-ae89-c3bf51d845b8-var-run-ovn\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.046580 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a43f00b0-1103-42d2-ae89-c3bf51d845b8-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "a43f00b0-1103-42d2-ae89-c3bf51d845b8" (UID: "a43f00b0-1103-42d2-ae89-c3bf51d845b8"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.047666 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a43f00b0-1103-42d2-ae89-c3bf51d845b8-scripts" (OuterVolumeSpecName: "scripts") pod "a43f00b0-1103-42d2-ae89-c3bf51d845b8" (UID: "a43f00b0-1103-42d2-ae89-c3bf51d845b8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.049555 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a43f00b0-1103-42d2-ae89-c3bf51d845b8-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "a43f00b0-1103-42d2-ae89-c3bf51d845b8" (UID: "a43f00b0-1103-42d2-ae89-c3bf51d845b8"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.051985 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a43f00b0-1103-42d2-ae89-c3bf51d845b8-kube-api-access-bff8j" (OuterVolumeSpecName: "kube-api-access-bff8j") pod "a43f00b0-1103-42d2-ae89-c3bf51d845b8" (UID: "a43f00b0-1103-42d2-ae89-c3bf51d845b8"). InnerVolumeSpecName "kube-api-access-bff8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.148335 5048 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/a43f00b0-1103-42d2-ae89-c3bf51d845b8-var-log-ovn\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.148371 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bff8j\" (UniqueName: \"kubernetes.io/projected/a43f00b0-1103-42d2-ae89-c3bf51d845b8-kube-api-access-bff8j\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.148400 5048 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/a43f00b0-1103-42d2-ae89-c3bf51d845b8-additional-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.148413 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a43f00b0-1103-42d2-ae89-c3bf51d845b8-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.281029 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Dec 13 06:48:51 crc kubenswrapper[5048]: W1213 06:48:51.295926 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb7f3766b_7c8f_43ed_bf8e_4cd4f6cbe2a4.slice/crio-4165224ef5e8132414a46aeaba2e84e05f91c6ef8d954f42f2b95b6e2fbee75b WatchSource:0}: Error finding container 4165224ef5e8132414a46aeaba2e84e05f91c6ef8d954f42f2b95b6e2fbee75b: Status 404 returned error can't find the container with id 4165224ef5e8132414a46aeaba2e84e05f91c6ef8d954f42f2b95b6e2fbee75b Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.726259 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4","Type":"ContainerStarted","Data":"4165224ef5e8132414a46aeaba2e84e05f91c6ef8d954f42f2b95b6e2fbee75b"} Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.728181 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e3a0bce1-8848-4ac7-a030-19640b952708","Type":"ContainerStarted","Data":"6037c65fef82f78efa1cd645f8f6f532d5d7048d1b16ac91f759541da6dbceee"} Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.728454 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.729856 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"a7bbc535-10f7-44cc-89a6-cbb697149e4a","Type":"ContainerStarted","Data":"76983382cf92288123a8dd9336351247957426def1673de16e24790c2e0b5eaa"} Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.730270 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.733352 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-2sh28-config-xm4j8" Dec 13 06:48:51 crc kubenswrapper[5048]: E1213 06:48:51.733795 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-hgfjd" podUID="91aa9cb1-7cd0-4cd3-9918-460b5d976ab7" Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.733983 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-2sh28-config-xm4j8" event={"ID":"a43f00b0-1103-42d2-ae89-c3bf51d845b8","Type":"ContainerDied","Data":"b65c51142f7b1828a7e2bc881cb92d917a5b5763933c6a88564ccaead70371ff"} Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.734008 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b65c51142f7b1828a7e2bc881cb92d917a5b5763933c6a88564ccaead70371ff" Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.752697 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=45.555256098 podStartE2EDuration="1m22.752676274s" podCreationTimestamp="2025-12-13 06:47:29 +0000 UTC" firstStartedPulling="2025-12-13 06:47:35.049268492 +0000 UTC m=+1088.915863073" lastFinishedPulling="2025-12-13 06:48:12.246688668 +0000 UTC m=+1126.113283249" observedRunningTime="2025-12-13 06:48:51.751921743 +0000 UTC m=+1165.618516344" watchObservedRunningTime="2025-12-13 06:48:51.752676274 +0000 UTC m=+1165.619270865" Dec 13 06:48:51 crc kubenswrapper[5048]: I1213 06:48:51.782543 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=46.638842211 podStartE2EDuration="1m23.782521947s" podCreationTimestamp="2025-12-13 06:47:28 +0000 UTC" firstStartedPulling="2025-12-13 06:47:35.050590639 +0000 UTC m=+1088.917185220" lastFinishedPulling="2025-12-13 06:48:12.194270375 +0000 UTC m=+1126.060864956" observedRunningTime="2025-12-13 06:48:51.775574098 +0000 UTC m=+1165.642168689" watchObservedRunningTime="2025-12-13 06:48:51.782521947 +0000 UTC m=+1165.649116538" Dec 13 06:48:51 crc kubenswrapper[5048]: E1213 06:48:51.870313 5048 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda43f00b0_1103_42d2_ae89_c3bf51d845b8.slice\": RecentStats: unable to find data in memory cache]" Dec 13 06:48:52 crc kubenswrapper[5048]: I1213 06:48:52.028075 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-2sh28-config-xm4j8"] Dec 13 06:48:52 crc kubenswrapper[5048]: I1213 06:48:52.035542 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-2sh28-config-xm4j8"] Dec 13 06:48:52 crc kubenswrapper[5048]: I1213 06:48:52.576514 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a43f00b0-1103-42d2-ae89-c3bf51d845b8" path="/var/lib/kubelet/pods/a43f00b0-1103-42d2-ae89-c3bf51d845b8/volumes" Dec 13 06:48:53 crc kubenswrapper[5048]: I1213 06:48:53.753242 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4","Type":"ContainerStarted","Data":"3050d3082b99b8ece77eb32ce5b0cf5b3b1610c19551ead5f4c1b00c4a7880db"} Dec 13 06:48:53 crc kubenswrapper[5048]: I1213 06:48:53.753574 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4","Type":"ContainerStarted","Data":"9c32443059bc2fc896e6864db7a632027a598a67965f0e1a51969254046cb7c2"} Dec 13 06:48:54 crc kubenswrapper[5048]: I1213 06:48:54.766897 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4","Type":"ContainerStarted","Data":"5d1bc6dd50fdb2f9e419025bc005a3cc9ea68caec848cb05010b64410a7a6ab1"} Dec 13 06:48:54 crc kubenswrapper[5048]: I1213 06:48:54.766973 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4","Type":"ContainerStarted","Data":"324519342e971a42c70d32c3c6c6a7babbc40986387d29281cf6d894fa58fc1f"} Dec 13 06:48:55 crc kubenswrapper[5048]: I1213 06:48:55.780193 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4","Type":"ContainerStarted","Data":"e582d6ec25981fd68ff76196411ee87034ec72c2b8f717e91e398a9e8dbb4dcc"} Dec 13 06:48:55 crc kubenswrapper[5048]: I1213 06:48:55.780612 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4","Type":"ContainerStarted","Data":"534c835346f0c38827de17d6f7cc9597bb59a397f2634b6a220231b573aab815"} Dec 13 06:48:55 crc kubenswrapper[5048]: I1213 06:48:55.780629 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4","Type":"ContainerStarted","Data":"54a506ffee6292fe826c9c891d99079106e9a505fed4408ee90d256a55ef52ae"} Dec 13 06:48:56 crc kubenswrapper[5048]: I1213 06:48:56.791578 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4","Type":"ContainerStarted","Data":"a6590381dd73e6c7d8c4aa3b06c8d72834f1bf31fca27523afb09ff71b205450"} Dec 13 06:48:58 crc kubenswrapper[5048]: I1213 06:48:58.816848 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4","Type":"ContainerStarted","Data":"6b09c6c81dc66a5648bf79e2034e05ca1033b9b25f95903cd2fb7ef674918c84"} Dec 13 06:48:58 crc kubenswrapper[5048]: I1213 06:48:58.817687 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4","Type":"ContainerStarted","Data":"bb75ca2a20a1d7db4981474686a8fe22ea42fff6b92aa78e9460dcd1c3301cc6"} Dec 13 06:48:58 crc kubenswrapper[5048]: I1213 06:48:58.817699 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4","Type":"ContainerStarted","Data":"73891798dc2e0c0b4e8821787d37e52e4e828ade5d7f1036a8d29b6688fb32e3"} Dec 13 06:48:58 crc kubenswrapper[5048]: I1213 06:48:58.817708 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4","Type":"ContainerStarted","Data":"71ad40b7d218ba8587bbf349d51a6df370775518eb70ba28d0619fd95bfe0636"} Dec 13 06:48:58 crc kubenswrapper[5048]: I1213 06:48:58.817718 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4","Type":"ContainerStarted","Data":"2648669f22c4be8063d2b25a00b90057152b260dbe7e08d51c90dcb1dbd3ca9a"} Dec 13 06:48:58 crc kubenswrapper[5048]: I1213 06:48:58.817726 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4","Type":"ContainerStarted","Data":"5ec46f17bb502b9e3dcf2fc5862a3e79f0a564479a1db6dd09f40c331a749255"} Dec 13 06:48:59 crc kubenswrapper[5048]: I1213 06:48:59.831506 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4","Type":"ContainerStarted","Data":"099942533cb1540ebb8733f2ef8b38a6c00ff4b2d0151f7f4e877ed032c9dfa1"} Dec 13 06:48:59 crc kubenswrapper[5048]: I1213 06:48:59.879362 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=38.542611589 podStartE2EDuration="44.879343862s" podCreationTimestamp="2025-12-13 06:48:15 +0000 UTC" firstStartedPulling="2025-12-13 06:48:51.298469677 +0000 UTC m=+1165.165064258" lastFinishedPulling="2025-12-13 06:48:57.63520195 +0000 UTC m=+1171.501796531" observedRunningTime="2025-12-13 06:48:59.865664059 +0000 UTC m=+1173.732258660" watchObservedRunningTime="2025-12-13 06:48:59.879343862 +0000 UTC m=+1173.745938443" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.215624 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-2kjcf"] Dec 13 06:49:00 crc kubenswrapper[5048]: E1213 06:49:00.215938 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a43f00b0-1103-42d2-ae89-c3bf51d845b8" containerName="ovn-config" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.215963 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="a43f00b0-1103-42d2-ae89-c3bf51d845b8" containerName="ovn-config" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.216131 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="a43f00b0-1103-42d2-ae89-c3bf51d845b8" containerName="ovn-config" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.216906 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.219030 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.234580 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-2kjcf"] Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.293200 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-2kjcf\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.293282 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4mfw\" (UniqueName: \"kubernetes.io/projected/b53097b1-3126-4e17-b910-d2b7f57ec87e-kube-api-access-v4mfw\") pod \"dnsmasq-dns-5c79d794d7-2kjcf\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.293323 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-2kjcf\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.293339 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-2kjcf\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.293403 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-config\") pod \"dnsmasq-dns-5c79d794d7-2kjcf\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.293535 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-2kjcf\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.395383 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-2kjcf\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.395519 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4mfw\" (UniqueName: \"kubernetes.io/projected/b53097b1-3126-4e17-b910-d2b7f57ec87e-kube-api-access-v4mfw\") pod \"dnsmasq-dns-5c79d794d7-2kjcf\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.395571 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-2kjcf\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.395592 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-2kjcf\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.395643 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-config\") pod \"dnsmasq-dns-5c79d794d7-2kjcf\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.395698 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-2kjcf\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.396777 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-2kjcf\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.396882 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-2kjcf\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.396993 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-2kjcf\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.397162 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-config\") pod \"dnsmasq-dns-5c79d794d7-2kjcf\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.397174 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-2kjcf\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.417184 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4mfw\" (UniqueName: \"kubernetes.io/projected/b53097b1-3126-4e17-b910-d2b7f57ec87e-kube-api-access-v4mfw\") pod \"dnsmasq-dns-5c79d794d7-2kjcf\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.536251 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.546713 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.615610 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.945908 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-c6qr4"] Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.947456 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-c6qr4" Dec 13 06:49:00 crc kubenswrapper[5048]: I1213 06:49:00.962850 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-c6qr4"] Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.019965 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ef4c1d97-0727-448e-910c-762a2f36ca80-operator-scripts\") pod \"cinder-db-create-c6qr4\" (UID: \"ef4c1d97-0727-448e-910c-762a2f36ca80\") " pod="openstack/cinder-db-create-c6qr4" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.020075 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cks5\" (UniqueName: \"kubernetes.io/projected/ef4c1d97-0727-448e-910c-762a2f36ca80-kube-api-access-9cks5\") pod \"cinder-db-create-c6qr4\" (UID: \"ef4c1d97-0727-448e-910c-762a2f36ca80\") " pod="openstack/cinder-db-create-c6qr4" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.071411 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-zwlkk"] Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.072335 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-zwlkk" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.084023 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-4fa6-account-create-update-wv7m6"] Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.086119 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-4fa6-account-create-update-wv7m6" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.091100 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.122376 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtcc2\" (UniqueName: \"kubernetes.io/projected/56bc6cf2-a689-4bbe-a20e-beda8ebe0165-kube-api-access-gtcc2\") pod \"barbican-4fa6-account-create-update-wv7m6\" (UID: \"56bc6cf2-a689-4bbe-a20e-beda8ebe0165\") " pod="openstack/barbican-4fa6-account-create-update-wv7m6" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.122808 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/56bc6cf2-a689-4bbe-a20e-beda8ebe0165-operator-scripts\") pod \"barbican-4fa6-account-create-update-wv7m6\" (UID: \"56bc6cf2-a689-4bbe-a20e-beda8ebe0165\") " pod="openstack/barbican-4fa6-account-create-update-wv7m6" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.122835 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wr4b\" (UniqueName: \"kubernetes.io/projected/37e65177-fd1b-4fee-aad0-e089b9e9c47b-kube-api-access-9wr4b\") pod \"barbican-db-create-zwlkk\" (UID: \"37e65177-fd1b-4fee-aad0-e089b9e9c47b\") " pod="openstack/barbican-db-create-zwlkk" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.122878 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cks5\" (UniqueName: \"kubernetes.io/projected/ef4c1d97-0727-448e-910c-762a2f36ca80-kube-api-access-9cks5\") pod \"cinder-db-create-c6qr4\" (UID: \"ef4c1d97-0727-448e-910c-762a2f36ca80\") " pod="openstack/cinder-db-create-c6qr4" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.122953 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37e65177-fd1b-4fee-aad0-e089b9e9c47b-operator-scripts\") pod \"barbican-db-create-zwlkk\" (UID: \"37e65177-fd1b-4fee-aad0-e089b9e9c47b\") " pod="openstack/barbican-db-create-zwlkk" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.123018 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ef4c1d97-0727-448e-910c-762a2f36ca80-operator-scripts\") pod \"cinder-db-create-c6qr4\" (UID: \"ef4c1d97-0727-448e-910c-762a2f36ca80\") " pod="openstack/cinder-db-create-c6qr4" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.123816 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ef4c1d97-0727-448e-910c-762a2f36ca80-operator-scripts\") pod \"cinder-db-create-c6qr4\" (UID: \"ef4c1d97-0727-448e-910c-762a2f36ca80\") " pod="openstack/cinder-db-create-c6qr4" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.181509 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-zwlkk"] Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.195410 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cks5\" (UniqueName: \"kubernetes.io/projected/ef4c1d97-0727-448e-910c-762a2f36ca80-kube-api-access-9cks5\") pod \"cinder-db-create-c6qr4\" (UID: \"ef4c1d97-0727-448e-910c-762a2f36ca80\") " pod="openstack/cinder-db-create-c6qr4" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.205364 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-4fa6-account-create-update-wv7m6"] Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.228092 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtcc2\" (UniqueName: \"kubernetes.io/projected/56bc6cf2-a689-4bbe-a20e-beda8ebe0165-kube-api-access-gtcc2\") pod \"barbican-4fa6-account-create-update-wv7m6\" (UID: \"56bc6cf2-a689-4bbe-a20e-beda8ebe0165\") " pod="openstack/barbican-4fa6-account-create-update-wv7m6" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.228190 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/56bc6cf2-a689-4bbe-a20e-beda8ebe0165-operator-scripts\") pod \"barbican-4fa6-account-create-update-wv7m6\" (UID: \"56bc6cf2-a689-4bbe-a20e-beda8ebe0165\") " pod="openstack/barbican-4fa6-account-create-update-wv7m6" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.228215 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wr4b\" (UniqueName: \"kubernetes.io/projected/37e65177-fd1b-4fee-aad0-e089b9e9c47b-kube-api-access-9wr4b\") pod \"barbican-db-create-zwlkk\" (UID: \"37e65177-fd1b-4fee-aad0-e089b9e9c47b\") " pod="openstack/barbican-db-create-zwlkk" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.228382 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37e65177-fd1b-4fee-aad0-e089b9e9c47b-operator-scripts\") pod \"barbican-db-create-zwlkk\" (UID: \"37e65177-fd1b-4fee-aad0-e089b9e9c47b\") " pod="openstack/barbican-db-create-zwlkk" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.231231 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37e65177-fd1b-4fee-aad0-e089b9e9c47b-operator-scripts\") pod \"barbican-db-create-zwlkk\" (UID: \"37e65177-fd1b-4fee-aad0-e089b9e9c47b\") " pod="openstack/barbican-db-create-zwlkk" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.233278 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/56bc6cf2-a689-4bbe-a20e-beda8ebe0165-operator-scripts\") pod \"barbican-4fa6-account-create-update-wv7m6\" (UID: \"56bc6cf2-a689-4bbe-a20e-beda8ebe0165\") " pod="openstack/barbican-4fa6-account-create-update-wv7m6" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.252109 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-2kjcf"] Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.258154 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wr4b\" (UniqueName: \"kubernetes.io/projected/37e65177-fd1b-4fee-aad0-e089b9e9c47b-kube-api-access-9wr4b\") pod \"barbican-db-create-zwlkk\" (UID: \"37e65177-fd1b-4fee-aad0-e089b9e9c47b\") " pod="openstack/barbican-db-create-zwlkk" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.259068 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-5aa0-account-create-update-24bj7"] Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.259191 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtcc2\" (UniqueName: \"kubernetes.io/projected/56bc6cf2-a689-4bbe-a20e-beda8ebe0165-kube-api-access-gtcc2\") pod \"barbican-4fa6-account-create-update-wv7m6\" (UID: \"56bc6cf2-a689-4bbe-a20e-beda8ebe0165\") " pod="openstack/barbican-4fa6-account-create-update-wv7m6" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.260093 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5aa0-account-create-update-24bj7" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.268655 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-5aa0-account-create-update-24bj7"] Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.270410 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.277021 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-hscms"] Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.278012 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-hscms" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.278538 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-c6qr4" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.283008 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.283065 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-hscms"] Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.283373 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.301206 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.301509 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-scw59" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.329839 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhr4g\" (UniqueName: \"kubernetes.io/projected/3f13f74e-d8ac-4cd4-a6f6-4797086d1f82-kube-api-access-fhr4g\") pod \"keystone-db-sync-hscms\" (UID: \"3f13f74e-d8ac-4cd4-a6f6-4797086d1f82\") " pod="openstack/keystone-db-sync-hscms" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.329913 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f13f74e-d8ac-4cd4-a6f6-4797086d1f82-config-data\") pod \"keystone-db-sync-hscms\" (UID: \"3f13f74e-d8ac-4cd4-a6f6-4797086d1f82\") " pod="openstack/keystone-db-sync-hscms" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.329948 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f13f74e-d8ac-4cd4-a6f6-4797086d1f82-combined-ca-bundle\") pod \"keystone-db-sync-hscms\" (UID: \"3f13f74e-d8ac-4cd4-a6f6-4797086d1f82\") " pod="openstack/keystone-db-sync-hscms" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.329982 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/84147fa9-3a4b-467d-a9db-d8c4c3ff66e8-operator-scripts\") pod \"cinder-5aa0-account-create-update-24bj7\" (UID: \"84147fa9-3a4b-467d-a9db-d8c4c3ff66e8\") " pod="openstack/cinder-5aa0-account-create-update-24bj7" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.330046 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klfr7\" (UniqueName: \"kubernetes.io/projected/84147fa9-3a4b-467d-a9db-d8c4c3ff66e8-kube-api-access-klfr7\") pod \"cinder-5aa0-account-create-update-24bj7\" (UID: \"84147fa9-3a4b-467d-a9db-d8c4c3ff66e8\") " pod="openstack/cinder-5aa0-account-create-update-24bj7" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.357320 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-b786-account-create-update-s8j9n"] Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.358702 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b786-account-create-update-s8j9n" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.360316 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.365964 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-wmwmb"] Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.373955 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-wmwmb" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.375483 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-b786-account-create-update-s8j9n"] Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.382534 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-wmwmb"] Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.432206 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f13f74e-d8ac-4cd4-a6f6-4797086d1f82-config-data\") pod \"keystone-db-sync-hscms\" (UID: \"3f13f74e-d8ac-4cd4-a6f6-4797086d1f82\") " pod="openstack/keystone-db-sync-hscms" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.432288 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlt7s\" (UniqueName: \"kubernetes.io/projected/a786107a-8b19-4bbd-9049-1d4590029976-kube-api-access-jlt7s\") pod \"neutron-b786-account-create-update-s8j9n\" (UID: \"a786107a-8b19-4bbd-9049-1d4590029976\") " pod="openstack/neutron-b786-account-create-update-s8j9n" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.432393 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f13f74e-d8ac-4cd4-a6f6-4797086d1f82-combined-ca-bundle\") pod \"keystone-db-sync-hscms\" (UID: \"3f13f74e-d8ac-4cd4-a6f6-4797086d1f82\") " pod="openstack/keystone-db-sync-hscms" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.432445 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/84147fa9-3a4b-467d-a9db-d8c4c3ff66e8-operator-scripts\") pod \"cinder-5aa0-account-create-update-24bj7\" (UID: \"84147fa9-3a4b-467d-a9db-d8c4c3ff66e8\") " pod="openstack/cinder-5aa0-account-create-update-24bj7" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.432772 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-klfr7\" (UniqueName: \"kubernetes.io/projected/84147fa9-3a4b-467d-a9db-d8c4c3ff66e8-kube-api-access-klfr7\") pod \"cinder-5aa0-account-create-update-24bj7\" (UID: \"84147fa9-3a4b-467d-a9db-d8c4c3ff66e8\") " pod="openstack/cinder-5aa0-account-create-update-24bj7" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.432809 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdhcz\" (UniqueName: \"kubernetes.io/projected/0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94-kube-api-access-jdhcz\") pod \"neutron-db-create-wmwmb\" (UID: \"0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94\") " pod="openstack/neutron-db-create-wmwmb" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.432828 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a786107a-8b19-4bbd-9049-1d4590029976-operator-scripts\") pod \"neutron-b786-account-create-update-s8j9n\" (UID: \"a786107a-8b19-4bbd-9049-1d4590029976\") " pod="openstack/neutron-b786-account-create-update-s8j9n" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.432843 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94-operator-scripts\") pod \"neutron-db-create-wmwmb\" (UID: \"0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94\") " pod="openstack/neutron-db-create-wmwmb" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.432862 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhr4g\" (UniqueName: \"kubernetes.io/projected/3f13f74e-d8ac-4cd4-a6f6-4797086d1f82-kube-api-access-fhr4g\") pod \"keystone-db-sync-hscms\" (UID: \"3f13f74e-d8ac-4cd4-a6f6-4797086d1f82\") " pod="openstack/keystone-db-sync-hscms" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.433741 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/84147fa9-3a4b-467d-a9db-d8c4c3ff66e8-operator-scripts\") pod \"cinder-5aa0-account-create-update-24bj7\" (UID: \"84147fa9-3a4b-467d-a9db-d8c4c3ff66e8\") " pod="openstack/cinder-5aa0-account-create-update-24bj7" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.438382 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f13f74e-d8ac-4cd4-a6f6-4797086d1f82-config-data\") pod \"keystone-db-sync-hscms\" (UID: \"3f13f74e-d8ac-4cd4-a6f6-4797086d1f82\") " pod="openstack/keystone-db-sync-hscms" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.438668 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-zwlkk" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.438891 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f13f74e-d8ac-4cd4-a6f6-4797086d1f82-combined-ca-bundle\") pod \"keystone-db-sync-hscms\" (UID: \"3f13f74e-d8ac-4cd4-a6f6-4797086d1f82\") " pod="openstack/keystone-db-sync-hscms" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.452458 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-klfr7\" (UniqueName: \"kubernetes.io/projected/84147fa9-3a4b-467d-a9db-d8c4c3ff66e8-kube-api-access-klfr7\") pod \"cinder-5aa0-account-create-update-24bj7\" (UID: \"84147fa9-3a4b-467d-a9db-d8c4c3ff66e8\") " pod="openstack/cinder-5aa0-account-create-update-24bj7" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.456517 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhr4g\" (UniqueName: \"kubernetes.io/projected/3f13f74e-d8ac-4cd4-a6f6-4797086d1f82-kube-api-access-fhr4g\") pod \"keystone-db-sync-hscms\" (UID: \"3f13f74e-d8ac-4cd4-a6f6-4797086d1f82\") " pod="openstack/keystone-db-sync-hscms" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.495316 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-4fa6-account-create-update-wv7m6" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.535083 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlt7s\" (UniqueName: \"kubernetes.io/projected/a786107a-8b19-4bbd-9049-1d4590029976-kube-api-access-jlt7s\") pod \"neutron-b786-account-create-update-s8j9n\" (UID: \"a786107a-8b19-4bbd-9049-1d4590029976\") " pod="openstack/neutron-b786-account-create-update-s8j9n" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.535308 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdhcz\" (UniqueName: \"kubernetes.io/projected/0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94-kube-api-access-jdhcz\") pod \"neutron-db-create-wmwmb\" (UID: \"0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94\") " pod="openstack/neutron-db-create-wmwmb" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.535333 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a786107a-8b19-4bbd-9049-1d4590029976-operator-scripts\") pod \"neutron-b786-account-create-update-s8j9n\" (UID: \"a786107a-8b19-4bbd-9049-1d4590029976\") " pod="openstack/neutron-b786-account-create-update-s8j9n" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.535351 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94-operator-scripts\") pod \"neutron-db-create-wmwmb\" (UID: \"0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94\") " pod="openstack/neutron-db-create-wmwmb" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.536520 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a786107a-8b19-4bbd-9049-1d4590029976-operator-scripts\") pod \"neutron-b786-account-create-update-s8j9n\" (UID: \"a786107a-8b19-4bbd-9049-1d4590029976\") " pod="openstack/neutron-b786-account-create-update-s8j9n" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.537198 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94-operator-scripts\") pod \"neutron-db-create-wmwmb\" (UID: \"0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94\") " pod="openstack/neutron-db-create-wmwmb" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.555908 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlt7s\" (UniqueName: \"kubernetes.io/projected/a786107a-8b19-4bbd-9049-1d4590029976-kube-api-access-jlt7s\") pod \"neutron-b786-account-create-update-s8j9n\" (UID: \"a786107a-8b19-4bbd-9049-1d4590029976\") " pod="openstack/neutron-b786-account-create-update-s8j9n" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.557766 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdhcz\" (UniqueName: \"kubernetes.io/projected/0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94-kube-api-access-jdhcz\") pod \"neutron-db-create-wmwmb\" (UID: \"0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94\") " pod="openstack/neutron-db-create-wmwmb" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.600342 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5aa0-account-create-update-24bj7" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.606478 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-hscms" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.669098 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-c6qr4"] Dec 13 06:49:01 crc kubenswrapper[5048]: W1213 06:49:01.689857 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podef4c1d97_0727_448e_910c_762a2f36ca80.slice/crio-1e1c6e6bcc46e123538328ed93a7267fa5e2c3336d8794cd0798bda3fe0d587b WatchSource:0}: Error finding container 1e1c6e6bcc46e123538328ed93a7267fa5e2c3336d8794cd0798bda3fe0d587b: Status 404 returned error can't find the container with id 1e1c6e6bcc46e123538328ed93a7267fa5e2c3336d8794cd0798bda3fe0d587b Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.702556 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-wmwmb" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.702875 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b786-account-create-update-s8j9n" Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.814408 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-zwlkk"] Dec 13 06:49:01 crc kubenswrapper[5048]: W1213 06:49:01.830710 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37e65177_fd1b_4fee_aad0_e089b9e9c47b.slice/crio-336863bdbc03709a806fa2cf11974442dfc6e3c1fd6d53622cf4c0d96a560d9b WatchSource:0}: Error finding container 336863bdbc03709a806fa2cf11974442dfc6e3c1fd6d53622cf4c0d96a560d9b: Status 404 returned error can't find the container with id 336863bdbc03709a806fa2cf11974442dfc6e3c1fd6d53622cf4c0d96a560d9b Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.883008 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-c6qr4" event={"ID":"ef4c1d97-0727-448e-910c-762a2f36ca80","Type":"ContainerStarted","Data":"1e1c6e6bcc46e123538328ed93a7267fa5e2c3336d8794cd0798bda3fe0d587b"} Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.902940 5048 generic.go:334] "Generic (PLEG): container finished" podID="b53097b1-3126-4e17-b910-d2b7f57ec87e" containerID="1f0cc3781d25c5f8bd9c9357cecd9de13b6028fe940551bf836d1e3a290ec1e9" exitCode=0 Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.903036 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" event={"ID":"b53097b1-3126-4e17-b910-d2b7f57ec87e","Type":"ContainerDied","Data":"1f0cc3781d25c5f8bd9c9357cecd9de13b6028fe940551bf836d1e3a290ec1e9"} Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.903060 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" event={"ID":"b53097b1-3126-4e17-b910-d2b7f57ec87e","Type":"ContainerStarted","Data":"ff272d80ea1548840dad8361734bc6acbe33d378be633a438baedc9a06e7def9"} Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.915650 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-zwlkk" event={"ID":"37e65177-fd1b-4fee-aad0-e089b9e9c47b","Type":"ContainerStarted","Data":"336863bdbc03709a806fa2cf11974442dfc6e3c1fd6d53622cf4c0d96a560d9b"} Dec 13 06:49:01 crc kubenswrapper[5048]: I1213 06:49:01.923575 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-4fa6-account-create-update-wv7m6"] Dec 13 06:49:02 crc kubenswrapper[5048]: I1213 06:49:02.234795 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-5aa0-account-create-update-24bj7"] Dec 13 06:49:02 crc kubenswrapper[5048]: I1213 06:49:02.311181 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-hscms"] Dec 13 06:49:02 crc kubenswrapper[5048]: I1213 06:49:02.333752 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-b786-account-create-update-s8j9n"] Dec 13 06:49:02 crc kubenswrapper[5048]: W1213 06:49:02.357053 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3f13f74e_d8ac_4cd4_a6f6_4797086d1f82.slice/crio-51c83ac14f84afd3c2badf65666ccaf3aad1e1ce1c9e5049fab84fa3d7cb29ec WatchSource:0}: Error finding container 51c83ac14f84afd3c2badf65666ccaf3aad1e1ce1c9e5049fab84fa3d7cb29ec: Status 404 returned error can't find the container with id 51c83ac14f84afd3c2badf65666ccaf3aad1e1ce1c9e5049fab84fa3d7cb29ec Dec 13 06:49:02 crc kubenswrapper[5048]: I1213 06:49:02.362376 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-wmwmb"] Dec 13 06:49:02 crc kubenswrapper[5048]: W1213 06:49:02.366036 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0f2d2a25_1fa1_4d52_9ecb_5efe9871ea94.slice/crio-58731c3054552ead09851a525a0b5dccab292a527d5580e6c31b9bc1a5bbc074 WatchSource:0}: Error finding container 58731c3054552ead09851a525a0b5dccab292a527d5580e6c31b9bc1a5bbc074: Status 404 returned error can't find the container with id 58731c3054552ead09851a525a0b5dccab292a527d5580e6c31b9bc1a5bbc074 Dec 13 06:49:02 crc kubenswrapper[5048]: I1213 06:49:02.940181 5048 generic.go:334] "Generic (PLEG): container finished" podID="56bc6cf2-a689-4bbe-a20e-beda8ebe0165" containerID="3070acb24c92f47da73bd14c2957f4daa8dc7e966234b03154e73a3445c788b1" exitCode=0 Dec 13 06:49:02 crc kubenswrapper[5048]: I1213 06:49:02.940545 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-4fa6-account-create-update-wv7m6" event={"ID":"56bc6cf2-a689-4bbe-a20e-beda8ebe0165","Type":"ContainerDied","Data":"3070acb24c92f47da73bd14c2957f4daa8dc7e966234b03154e73a3445c788b1"} Dec 13 06:49:02 crc kubenswrapper[5048]: I1213 06:49:02.940581 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-4fa6-account-create-update-wv7m6" event={"ID":"56bc6cf2-a689-4bbe-a20e-beda8ebe0165","Type":"ContainerStarted","Data":"8bf062f50dc792d3e4d8fb3d95284deb59fd608f9822f789fe1890963f6d902d"} Dec 13 06:49:02 crc kubenswrapper[5048]: I1213 06:49:02.949925 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-wmwmb" event={"ID":"0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94","Type":"ContainerStarted","Data":"82442e432e821a4d252f2716b7c9b49086a27d00fb2c25035a6b3287adb9cee8"} Dec 13 06:49:02 crc kubenswrapper[5048]: I1213 06:49:02.949967 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-wmwmb" event={"ID":"0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94","Type":"ContainerStarted","Data":"58731c3054552ead09851a525a0b5dccab292a527d5580e6c31b9bc1a5bbc074"} Dec 13 06:49:02 crc kubenswrapper[5048]: I1213 06:49:02.960818 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5aa0-account-create-update-24bj7" event={"ID":"84147fa9-3a4b-467d-a9db-d8c4c3ff66e8","Type":"ContainerStarted","Data":"35c9b8fac43d8dd833b06d4173252abc50df46149293138c6f6b26d139e5980f"} Dec 13 06:49:02 crc kubenswrapper[5048]: I1213 06:49:02.960862 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5aa0-account-create-update-24bj7" event={"ID":"84147fa9-3a4b-467d-a9db-d8c4c3ff66e8","Type":"ContainerStarted","Data":"6b054aa7a7a6e683d9869afe73ad98c98a3ee1a7217e9c8f1cae7da922e1eaef"} Dec 13 06:49:02 crc kubenswrapper[5048]: I1213 06:49:02.972212 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-hscms" event={"ID":"3f13f74e-d8ac-4cd4-a6f6-4797086d1f82","Type":"ContainerStarted","Data":"51c83ac14f84afd3c2badf65666ccaf3aad1e1ce1c9e5049fab84fa3d7cb29ec"} Dec 13 06:49:02 crc kubenswrapper[5048]: I1213 06:49:02.983136 5048 generic.go:334] "Generic (PLEG): container finished" podID="ef4c1d97-0727-448e-910c-762a2f36ca80" containerID="a2c4f6a3fb24cf8b9fff1bda7e6f4164423ce94e4e26cfe3cb7c82380aab60b1" exitCode=0 Dec 13 06:49:02 crc kubenswrapper[5048]: I1213 06:49:02.983218 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-c6qr4" event={"ID":"ef4c1d97-0727-448e-910c-762a2f36ca80","Type":"ContainerDied","Data":"a2c4f6a3fb24cf8b9fff1bda7e6f4164423ce94e4e26cfe3cb7c82380aab60b1"} Dec 13 06:49:02 crc kubenswrapper[5048]: I1213 06:49:02.987819 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-5aa0-account-create-update-24bj7" podStartSLOduration=1.987804196 podStartE2EDuration="1.987804196s" podCreationTimestamp="2025-12-13 06:49:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:49:02.9846557 +0000 UTC m=+1176.851250301" watchObservedRunningTime="2025-12-13 06:49:02.987804196 +0000 UTC m=+1176.854398767" Dec 13 06:49:02 crc kubenswrapper[5048]: I1213 06:49:02.995787 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" event={"ID":"b53097b1-3126-4e17-b910-d2b7f57ec87e","Type":"ContainerStarted","Data":"6a3d15e2b73d49faf7e2bc2dde00bdf8e7146833acddabd1d2461771ec1f3cd7"} Dec 13 06:49:02 crc kubenswrapper[5048]: I1213 06:49:02.996573 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:03 crc kubenswrapper[5048]: I1213 06:49:03.016694 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b786-account-create-update-s8j9n" event={"ID":"a786107a-8b19-4bbd-9049-1d4590029976","Type":"ContainerStarted","Data":"27fbcde69ec116cfdf5baaf699c29cca5ef0f26fb9d19add532f43b2389a8475"} Dec 13 06:49:03 crc kubenswrapper[5048]: I1213 06:49:03.016742 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b786-account-create-update-s8j9n" event={"ID":"a786107a-8b19-4bbd-9049-1d4590029976","Type":"ContainerStarted","Data":"754497789ccc1b794275333290962a57127a518f1349e2cd1a81aa948721e3ae"} Dec 13 06:49:03 crc kubenswrapper[5048]: I1213 06:49:03.025959 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-wmwmb" podStartSLOduration=2.025941875 podStartE2EDuration="2.025941875s" podCreationTimestamp="2025-12-13 06:49:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:49:03.023503358 +0000 UTC m=+1176.890097959" watchObservedRunningTime="2025-12-13 06:49:03.025941875 +0000 UTC m=+1176.892536456" Dec 13 06:49:03 crc kubenswrapper[5048]: I1213 06:49:03.035689 5048 generic.go:334] "Generic (PLEG): container finished" podID="37e65177-fd1b-4fee-aad0-e089b9e9c47b" containerID="7bc793e1a64b123b2edbf3651f7addc018f948f75e8ca0093d09fc5b99a1b465" exitCode=0 Dec 13 06:49:03 crc kubenswrapper[5048]: I1213 06:49:03.035751 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-zwlkk" event={"ID":"37e65177-fd1b-4fee-aad0-e089b9e9c47b","Type":"ContainerDied","Data":"7bc793e1a64b123b2edbf3651f7addc018f948f75e8ca0093d09fc5b99a1b465"} Dec 13 06:49:03 crc kubenswrapper[5048]: I1213 06:49:03.103679 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" podStartSLOduration=3.103663834 podStartE2EDuration="3.103663834s" podCreationTimestamp="2025-12-13 06:49:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:49:03.07566237 +0000 UTC m=+1176.942256951" watchObservedRunningTime="2025-12-13 06:49:03.103663834 +0000 UTC m=+1176.970258415" Dec 13 06:49:03 crc kubenswrapper[5048]: I1213 06:49:03.159426 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-b786-account-create-update-s8j9n" podStartSLOduration=2.159408362 podStartE2EDuration="2.159408362s" podCreationTimestamp="2025-12-13 06:49:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:49:03.139674254 +0000 UTC m=+1177.006268835" watchObservedRunningTime="2025-12-13 06:49:03.159408362 +0000 UTC m=+1177.026002943" Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.049041 5048 generic.go:334] "Generic (PLEG): container finished" podID="a786107a-8b19-4bbd-9049-1d4590029976" containerID="27fbcde69ec116cfdf5baaf699c29cca5ef0f26fb9d19add532f43b2389a8475" exitCode=0 Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.049256 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b786-account-create-update-s8j9n" event={"ID":"a786107a-8b19-4bbd-9049-1d4590029976","Type":"ContainerDied","Data":"27fbcde69ec116cfdf5baaf699c29cca5ef0f26fb9d19add532f43b2389a8475"} Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.051349 5048 generic.go:334] "Generic (PLEG): container finished" podID="0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94" containerID="82442e432e821a4d252f2716b7c9b49086a27d00fb2c25035a6b3287adb9cee8" exitCode=0 Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.051398 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-wmwmb" event={"ID":"0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94","Type":"ContainerDied","Data":"82442e432e821a4d252f2716b7c9b49086a27d00fb2c25035a6b3287adb9cee8"} Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.053243 5048 generic.go:334] "Generic (PLEG): container finished" podID="84147fa9-3a4b-467d-a9db-d8c4c3ff66e8" containerID="35c9b8fac43d8dd833b06d4173252abc50df46149293138c6f6b26d139e5980f" exitCode=0 Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.053310 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5aa0-account-create-update-24bj7" event={"ID":"84147fa9-3a4b-467d-a9db-d8c4c3ff66e8","Type":"ContainerDied","Data":"35c9b8fac43d8dd833b06d4173252abc50df46149293138c6f6b26d139e5980f"} Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.055547 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-hgfjd" event={"ID":"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7","Type":"ContainerStarted","Data":"ef7f01bdd05b3ad43ec1c9d4430e36471ec8d83f92a657215c6eef5ee1d95ccd"} Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.093108 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-hgfjd" podStartSLOduration=2.062291257 podStartE2EDuration="35.093089694s" podCreationTimestamp="2025-12-13 06:48:29 +0000 UTC" firstStartedPulling="2025-12-13 06:48:30.00476825 +0000 UTC m=+1143.871362831" lastFinishedPulling="2025-12-13 06:49:03.035566687 +0000 UTC m=+1176.902161268" observedRunningTime="2025-12-13 06:49:04.08300729 +0000 UTC m=+1177.949601891" watchObservedRunningTime="2025-12-13 06:49:04.093089694 +0000 UTC m=+1177.959684275" Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.508796 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-c6qr4" Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.521414 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-zwlkk" Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.535269 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-4fa6-account-create-update-wv7m6" Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.591485 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9cks5\" (UniqueName: \"kubernetes.io/projected/ef4c1d97-0727-448e-910c-762a2f36ca80-kube-api-access-9cks5\") pod \"ef4c1d97-0727-448e-910c-762a2f36ca80\" (UID: \"ef4c1d97-0727-448e-910c-762a2f36ca80\") " Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.591598 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtcc2\" (UniqueName: \"kubernetes.io/projected/56bc6cf2-a689-4bbe-a20e-beda8ebe0165-kube-api-access-gtcc2\") pod \"56bc6cf2-a689-4bbe-a20e-beda8ebe0165\" (UID: \"56bc6cf2-a689-4bbe-a20e-beda8ebe0165\") " Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.591626 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37e65177-fd1b-4fee-aad0-e089b9e9c47b-operator-scripts\") pod \"37e65177-fd1b-4fee-aad0-e089b9e9c47b\" (UID: \"37e65177-fd1b-4fee-aad0-e089b9e9c47b\") " Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.591680 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ef4c1d97-0727-448e-910c-762a2f36ca80-operator-scripts\") pod \"ef4c1d97-0727-448e-910c-762a2f36ca80\" (UID: \"ef4c1d97-0727-448e-910c-762a2f36ca80\") " Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.591749 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/56bc6cf2-a689-4bbe-a20e-beda8ebe0165-operator-scripts\") pod \"56bc6cf2-a689-4bbe-a20e-beda8ebe0165\" (UID: \"56bc6cf2-a689-4bbe-a20e-beda8ebe0165\") " Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.591800 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9wr4b\" (UniqueName: \"kubernetes.io/projected/37e65177-fd1b-4fee-aad0-e089b9e9c47b-kube-api-access-9wr4b\") pod \"37e65177-fd1b-4fee-aad0-e089b9e9c47b\" (UID: \"37e65177-fd1b-4fee-aad0-e089b9e9c47b\") " Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.592549 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37e65177-fd1b-4fee-aad0-e089b9e9c47b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "37e65177-fd1b-4fee-aad0-e089b9e9c47b" (UID: "37e65177-fd1b-4fee-aad0-e089b9e9c47b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.592543 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/56bc6cf2-a689-4bbe-a20e-beda8ebe0165-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "56bc6cf2-a689-4bbe-a20e-beda8ebe0165" (UID: "56bc6cf2-a689-4bbe-a20e-beda8ebe0165"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.593067 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef4c1d97-0727-448e-910c-762a2f36ca80-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ef4c1d97-0727-448e-910c-762a2f36ca80" (UID: "ef4c1d97-0727-448e-910c-762a2f36ca80"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.593489 5048 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37e65177-fd1b-4fee-aad0-e089b9e9c47b-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.593515 5048 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ef4c1d97-0727-448e-910c-762a2f36ca80-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.593529 5048 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/56bc6cf2-a689-4bbe-a20e-beda8ebe0165-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.603029 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef4c1d97-0727-448e-910c-762a2f36ca80-kube-api-access-9cks5" (OuterVolumeSpecName: "kube-api-access-9cks5") pod "ef4c1d97-0727-448e-910c-762a2f36ca80" (UID: "ef4c1d97-0727-448e-910c-762a2f36ca80"). InnerVolumeSpecName "kube-api-access-9cks5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.600800 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37e65177-fd1b-4fee-aad0-e089b9e9c47b-kube-api-access-9wr4b" (OuterVolumeSpecName: "kube-api-access-9wr4b") pod "37e65177-fd1b-4fee-aad0-e089b9e9c47b" (UID: "37e65177-fd1b-4fee-aad0-e089b9e9c47b"). InnerVolumeSpecName "kube-api-access-9wr4b". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.620686 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56bc6cf2-a689-4bbe-a20e-beda8ebe0165-kube-api-access-gtcc2" (OuterVolumeSpecName: "kube-api-access-gtcc2") pod "56bc6cf2-a689-4bbe-a20e-beda8ebe0165" (UID: "56bc6cf2-a689-4bbe-a20e-beda8ebe0165"). InnerVolumeSpecName "kube-api-access-gtcc2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.695060 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9cks5\" (UniqueName: \"kubernetes.io/projected/ef4c1d97-0727-448e-910c-762a2f36ca80-kube-api-access-9cks5\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.695096 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtcc2\" (UniqueName: \"kubernetes.io/projected/56bc6cf2-a689-4bbe-a20e-beda8ebe0165-kube-api-access-gtcc2\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:04 crc kubenswrapper[5048]: I1213 06:49:04.695105 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9wr4b\" (UniqueName: \"kubernetes.io/projected/37e65177-fd1b-4fee-aad0-e089b9e9c47b-kube-api-access-9wr4b\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:05 crc kubenswrapper[5048]: I1213 06:49:05.066604 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-4fa6-account-create-update-wv7m6" event={"ID":"56bc6cf2-a689-4bbe-a20e-beda8ebe0165","Type":"ContainerDied","Data":"8bf062f50dc792d3e4d8fb3d95284deb59fd608f9822f789fe1890963f6d902d"} Dec 13 06:49:05 crc kubenswrapper[5048]: I1213 06:49:05.066657 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8bf062f50dc792d3e4d8fb3d95284deb59fd608f9822f789fe1890963f6d902d" Dec 13 06:49:05 crc kubenswrapper[5048]: I1213 06:49:05.066719 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-4fa6-account-create-update-wv7m6" Dec 13 06:49:05 crc kubenswrapper[5048]: I1213 06:49:05.069079 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-c6qr4" event={"ID":"ef4c1d97-0727-448e-910c-762a2f36ca80","Type":"ContainerDied","Data":"1e1c6e6bcc46e123538328ed93a7267fa5e2c3336d8794cd0798bda3fe0d587b"} Dec 13 06:49:05 crc kubenswrapper[5048]: I1213 06:49:05.069106 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-c6qr4" Dec 13 06:49:05 crc kubenswrapper[5048]: I1213 06:49:05.069119 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1e1c6e6bcc46e123538328ed93a7267fa5e2c3336d8794cd0798bda3fe0d587b" Dec 13 06:49:05 crc kubenswrapper[5048]: I1213 06:49:05.072910 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-zwlkk" Dec 13 06:49:05 crc kubenswrapper[5048]: I1213 06:49:05.076184 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-zwlkk" event={"ID":"37e65177-fd1b-4fee-aad0-e089b9e9c47b","Type":"ContainerDied","Data":"336863bdbc03709a806fa2cf11974442dfc6e3c1fd6d53622cf4c0d96a560d9b"} Dec 13 06:49:05 crc kubenswrapper[5048]: I1213 06:49:05.076775 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="336863bdbc03709a806fa2cf11974442dfc6e3c1fd6d53622cf4c0d96a560d9b" Dec 13 06:49:07 crc kubenswrapper[5048]: I1213 06:49:07.983463 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b786-account-create-update-s8j9n" Dec 13 06:49:07 crc kubenswrapper[5048]: I1213 06:49:07.988707 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-wmwmb" Dec 13 06:49:07 crc kubenswrapper[5048]: I1213 06:49:07.994971 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5aa0-account-create-update-24bj7" Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.057179 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jlt7s\" (UniqueName: \"kubernetes.io/projected/a786107a-8b19-4bbd-9049-1d4590029976-kube-api-access-jlt7s\") pod \"a786107a-8b19-4bbd-9049-1d4590029976\" (UID: \"a786107a-8b19-4bbd-9049-1d4590029976\") " Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.057269 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94-operator-scripts\") pod \"0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94\" (UID: \"0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94\") " Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.057314 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdhcz\" (UniqueName: \"kubernetes.io/projected/0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94-kube-api-access-jdhcz\") pod \"0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94\" (UID: \"0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94\") " Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.057358 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/84147fa9-3a4b-467d-a9db-d8c4c3ff66e8-operator-scripts\") pod \"84147fa9-3a4b-467d-a9db-d8c4c3ff66e8\" (UID: \"84147fa9-3a4b-467d-a9db-d8c4c3ff66e8\") " Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.057395 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a786107a-8b19-4bbd-9049-1d4590029976-operator-scripts\") pod \"a786107a-8b19-4bbd-9049-1d4590029976\" (UID: \"a786107a-8b19-4bbd-9049-1d4590029976\") " Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.057555 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-klfr7\" (UniqueName: \"kubernetes.io/projected/84147fa9-3a4b-467d-a9db-d8c4c3ff66e8-kube-api-access-klfr7\") pod \"84147fa9-3a4b-467d-a9db-d8c4c3ff66e8\" (UID: \"84147fa9-3a4b-467d-a9db-d8c4c3ff66e8\") " Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.058190 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84147fa9-3a4b-467d-a9db-d8c4c3ff66e8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "84147fa9-3a4b-467d-a9db-d8c4c3ff66e8" (UID: "84147fa9-3a4b-467d-a9db-d8c4c3ff66e8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.058302 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94" (UID: "0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.060835 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a786107a-8b19-4bbd-9049-1d4590029976-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a786107a-8b19-4bbd-9049-1d4590029976" (UID: "a786107a-8b19-4bbd-9049-1d4590029976"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.064130 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a786107a-8b19-4bbd-9049-1d4590029976-kube-api-access-jlt7s" (OuterVolumeSpecName: "kube-api-access-jlt7s") pod "a786107a-8b19-4bbd-9049-1d4590029976" (UID: "a786107a-8b19-4bbd-9049-1d4590029976"). InnerVolumeSpecName "kube-api-access-jlt7s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.066027 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94-kube-api-access-jdhcz" (OuterVolumeSpecName: "kube-api-access-jdhcz") pod "0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94" (UID: "0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94"). InnerVolumeSpecName "kube-api-access-jdhcz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.066696 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84147fa9-3a4b-467d-a9db-d8c4c3ff66e8-kube-api-access-klfr7" (OuterVolumeSpecName: "kube-api-access-klfr7") pod "84147fa9-3a4b-467d-a9db-d8c4c3ff66e8" (UID: "84147fa9-3a4b-467d-a9db-d8c4c3ff66e8"). InnerVolumeSpecName "kube-api-access-klfr7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.109309 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-wmwmb" event={"ID":"0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94","Type":"ContainerDied","Data":"58731c3054552ead09851a525a0b5dccab292a527d5580e6c31b9bc1a5bbc074"} Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.110386 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="58731c3054552ead09851a525a0b5dccab292a527d5580e6c31b9bc1a5bbc074" Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.110686 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-wmwmb" Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.113161 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5aa0-account-create-update-24bj7" event={"ID":"84147fa9-3a4b-467d-a9db-d8c4c3ff66e8","Type":"ContainerDied","Data":"6b054aa7a7a6e683d9869afe73ad98c98a3ee1a7217e9c8f1cae7da922e1eaef"} Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.113208 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b054aa7a7a6e683d9869afe73ad98c98a3ee1a7217e9c8f1cae7da922e1eaef" Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.113313 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5aa0-account-create-update-24bj7" Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.115688 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b786-account-create-update-s8j9n" event={"ID":"a786107a-8b19-4bbd-9049-1d4590029976","Type":"ContainerDied","Data":"754497789ccc1b794275333290962a57127a518f1349e2cd1a81aa948721e3ae"} Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.115723 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="754497789ccc1b794275333290962a57127a518f1349e2cd1a81aa948721e3ae" Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.115791 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b786-account-create-update-s8j9n" Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.159336 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-klfr7\" (UniqueName: \"kubernetes.io/projected/84147fa9-3a4b-467d-a9db-d8c4c3ff66e8-kube-api-access-klfr7\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.159379 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jlt7s\" (UniqueName: \"kubernetes.io/projected/a786107a-8b19-4bbd-9049-1d4590029976-kube-api-access-jlt7s\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.159390 5048 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.159399 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdhcz\" (UniqueName: \"kubernetes.io/projected/0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94-kube-api-access-jdhcz\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.159410 5048 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/84147fa9-3a4b-467d-a9db-d8c4c3ff66e8-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:08 crc kubenswrapper[5048]: I1213 06:49:08.159419 5048 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a786107a-8b19-4bbd-9049-1d4590029976-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:09 crc kubenswrapper[5048]: I1213 06:49:09.125740 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-hscms" event={"ID":"3f13f74e-d8ac-4cd4-a6f6-4797086d1f82","Type":"ContainerStarted","Data":"2d144f7ad2976b897eb79a7c1f0dda93d8476a625581e9c46d6e945dc2365ac3"} Dec 13 06:49:09 crc kubenswrapper[5048]: I1213 06:49:09.158529 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-hscms" podStartSLOduration=2.515164736 podStartE2EDuration="8.158506665s" podCreationTimestamp="2025-12-13 06:49:01 +0000 UTC" firstStartedPulling="2025-12-13 06:49:02.378850322 +0000 UTC m=+1176.245444903" lastFinishedPulling="2025-12-13 06:49:08.022192251 +0000 UTC m=+1181.888786832" observedRunningTime="2025-12-13 06:49:09.154419104 +0000 UTC m=+1183.021013695" watchObservedRunningTime="2025-12-13 06:49:09.158506665 +0000 UTC m=+1183.025101246" Dec 13 06:49:10 crc kubenswrapper[5048]: I1213 06:49:10.538213 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:10 crc kubenswrapper[5048]: I1213 06:49:10.599819 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-j7782"] Dec 13 06:49:10 crc kubenswrapper[5048]: I1213 06:49:10.600127 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-j7782" podUID="73c1e78c-8d3f-45d1-976e-284ae877919e" containerName="dnsmasq-dns" containerID="cri-o://0f3e8e0048d88d3438413b9205bc74cd2cefd5c1ecd653dcb34749470109e453" gracePeriod=10 Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.066674 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-j7782" Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.108591 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-config\") pod \"73c1e78c-8d3f-45d1-976e-284ae877919e\" (UID: \"73c1e78c-8d3f-45d1-976e-284ae877919e\") " Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.108673 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-dns-svc\") pod \"73c1e78c-8d3f-45d1-976e-284ae877919e\" (UID: \"73c1e78c-8d3f-45d1-976e-284ae877919e\") " Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.108764 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-ovsdbserver-nb\") pod \"73c1e78c-8d3f-45d1-976e-284ae877919e\" (UID: \"73c1e78c-8d3f-45d1-976e-284ae877919e\") " Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.108839 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-ovsdbserver-sb\") pod \"73c1e78c-8d3f-45d1-976e-284ae877919e\" (UID: \"73c1e78c-8d3f-45d1-976e-284ae877919e\") " Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.109107 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cb9tr\" (UniqueName: \"kubernetes.io/projected/73c1e78c-8d3f-45d1-976e-284ae877919e-kube-api-access-cb9tr\") pod \"73c1e78c-8d3f-45d1-976e-284ae877919e\" (UID: \"73c1e78c-8d3f-45d1-976e-284ae877919e\") " Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.117636 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73c1e78c-8d3f-45d1-976e-284ae877919e-kube-api-access-cb9tr" (OuterVolumeSpecName: "kube-api-access-cb9tr") pod "73c1e78c-8d3f-45d1-976e-284ae877919e" (UID: "73c1e78c-8d3f-45d1-976e-284ae877919e"). InnerVolumeSpecName "kube-api-access-cb9tr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.151219 5048 generic.go:334] "Generic (PLEG): container finished" podID="73c1e78c-8d3f-45d1-976e-284ae877919e" containerID="0f3e8e0048d88d3438413b9205bc74cd2cefd5c1ecd653dcb34749470109e453" exitCode=0 Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.151301 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-j7782" Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.151315 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-j7782" event={"ID":"73c1e78c-8d3f-45d1-976e-284ae877919e","Type":"ContainerDied","Data":"0f3e8e0048d88d3438413b9205bc74cd2cefd5c1ecd653dcb34749470109e453"} Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.152004 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-j7782" event={"ID":"73c1e78c-8d3f-45d1-976e-284ae877919e","Type":"ContainerDied","Data":"d77c7adab7b6d0102b976b681ba67ec52799e87c8dc5b5d5866e4e7e24f09be0"} Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.152027 5048 scope.go:117] "RemoveContainer" containerID="0f3e8e0048d88d3438413b9205bc74cd2cefd5c1ecd653dcb34749470109e453" Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.159929 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "73c1e78c-8d3f-45d1-976e-284ae877919e" (UID: "73c1e78c-8d3f-45d1-976e-284ae877919e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.166346 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "73c1e78c-8d3f-45d1-976e-284ae877919e" (UID: "73c1e78c-8d3f-45d1-976e-284ae877919e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.169361 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "73c1e78c-8d3f-45d1-976e-284ae877919e" (UID: "73c1e78c-8d3f-45d1-976e-284ae877919e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.180914 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-config" (OuterVolumeSpecName: "config") pod "73c1e78c-8d3f-45d1-976e-284ae877919e" (UID: "73c1e78c-8d3f-45d1-976e-284ae877919e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.210638 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cb9tr\" (UniqueName: \"kubernetes.io/projected/73c1e78c-8d3f-45d1-976e-284ae877919e-kube-api-access-cb9tr\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.210671 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.210681 5048 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.210692 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.210702 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/73c1e78c-8d3f-45d1-976e-284ae877919e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.223703 5048 scope.go:117] "RemoveContainer" containerID="283d10bcbd3be3474e0ad1377274a2d802168b7709ad649dff2c63f3dbb6e82e" Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.249321 5048 scope.go:117] "RemoveContainer" containerID="0f3e8e0048d88d3438413b9205bc74cd2cefd5c1ecd653dcb34749470109e453" Dec 13 06:49:11 crc kubenswrapper[5048]: E1213 06:49:11.251061 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f3e8e0048d88d3438413b9205bc74cd2cefd5c1ecd653dcb34749470109e453\": container with ID starting with 0f3e8e0048d88d3438413b9205bc74cd2cefd5c1ecd653dcb34749470109e453 not found: ID does not exist" containerID="0f3e8e0048d88d3438413b9205bc74cd2cefd5c1ecd653dcb34749470109e453" Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.251107 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f3e8e0048d88d3438413b9205bc74cd2cefd5c1ecd653dcb34749470109e453"} err="failed to get container status \"0f3e8e0048d88d3438413b9205bc74cd2cefd5c1ecd653dcb34749470109e453\": rpc error: code = NotFound desc = could not find container \"0f3e8e0048d88d3438413b9205bc74cd2cefd5c1ecd653dcb34749470109e453\": container with ID starting with 0f3e8e0048d88d3438413b9205bc74cd2cefd5c1ecd653dcb34749470109e453 not found: ID does not exist" Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.251150 5048 scope.go:117] "RemoveContainer" containerID="283d10bcbd3be3474e0ad1377274a2d802168b7709ad649dff2c63f3dbb6e82e" Dec 13 06:49:11 crc kubenswrapper[5048]: E1213 06:49:11.251730 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"283d10bcbd3be3474e0ad1377274a2d802168b7709ad649dff2c63f3dbb6e82e\": container with ID starting with 283d10bcbd3be3474e0ad1377274a2d802168b7709ad649dff2c63f3dbb6e82e not found: ID does not exist" containerID="283d10bcbd3be3474e0ad1377274a2d802168b7709ad649dff2c63f3dbb6e82e" Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.251750 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"283d10bcbd3be3474e0ad1377274a2d802168b7709ad649dff2c63f3dbb6e82e"} err="failed to get container status \"283d10bcbd3be3474e0ad1377274a2d802168b7709ad649dff2c63f3dbb6e82e\": rpc error: code = NotFound desc = could not find container \"283d10bcbd3be3474e0ad1377274a2d802168b7709ad649dff2c63f3dbb6e82e\": container with ID starting with 283d10bcbd3be3474e0ad1377274a2d802168b7709ad649dff2c63f3dbb6e82e not found: ID does not exist" Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.484949 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-j7782"] Dec 13 06:49:11 crc kubenswrapper[5048]: I1213 06:49:11.490805 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-j7782"] Dec 13 06:49:12 crc kubenswrapper[5048]: I1213 06:49:12.160804 5048 generic.go:334] "Generic (PLEG): container finished" podID="91aa9cb1-7cd0-4cd3-9918-460b5d976ab7" containerID="ef7f01bdd05b3ad43ec1c9d4430e36471ec8d83f92a657215c6eef5ee1d95ccd" exitCode=0 Dec 13 06:49:12 crc kubenswrapper[5048]: I1213 06:49:12.161361 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-hgfjd" event={"ID":"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7","Type":"ContainerDied","Data":"ef7f01bdd05b3ad43ec1c9d4430e36471ec8d83f92a657215c6eef5ee1d95ccd"} Dec 13 06:49:12 crc kubenswrapper[5048]: I1213 06:49:12.164385 5048 generic.go:334] "Generic (PLEG): container finished" podID="3f13f74e-d8ac-4cd4-a6f6-4797086d1f82" containerID="2d144f7ad2976b897eb79a7c1f0dda93d8476a625581e9c46d6e945dc2365ac3" exitCode=0 Dec 13 06:49:12 crc kubenswrapper[5048]: I1213 06:49:12.164446 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-hscms" event={"ID":"3f13f74e-d8ac-4cd4-a6f6-4797086d1f82","Type":"ContainerDied","Data":"2d144f7ad2976b897eb79a7c1f0dda93d8476a625581e9c46d6e945dc2365ac3"} Dec 13 06:49:12 crc kubenswrapper[5048]: I1213 06:49:12.576247 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="73c1e78c-8d3f-45d1-976e-284ae877919e" path="/var/lib/kubelet/pods/73c1e78c-8d3f-45d1-976e-284ae877919e/volumes" Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.571191 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-hscms" Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.578985 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-hgfjd" Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.675793 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-combined-ca-bundle\") pod \"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7\" (UID: \"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7\") " Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.675833 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fhr4g\" (UniqueName: \"kubernetes.io/projected/3f13f74e-d8ac-4cd4-a6f6-4797086d1f82-kube-api-access-fhr4g\") pod \"3f13f74e-d8ac-4cd4-a6f6-4797086d1f82\" (UID: \"3f13f74e-d8ac-4cd4-a6f6-4797086d1f82\") " Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.675920 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f13f74e-d8ac-4cd4-a6f6-4797086d1f82-config-data\") pod \"3f13f74e-d8ac-4cd4-a6f6-4797086d1f82\" (UID: \"3f13f74e-d8ac-4cd4-a6f6-4797086d1f82\") " Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.675979 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-db-sync-config-data\") pod \"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7\" (UID: \"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7\") " Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.676015 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9gjbh\" (UniqueName: \"kubernetes.io/projected/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-kube-api-access-9gjbh\") pod \"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7\" (UID: \"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7\") " Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.676127 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-config-data\") pod \"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7\" (UID: \"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7\") " Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.676159 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f13f74e-d8ac-4cd4-a6f6-4797086d1f82-combined-ca-bundle\") pod \"3f13f74e-d8ac-4cd4-a6f6-4797086d1f82\" (UID: \"3f13f74e-d8ac-4cd4-a6f6-4797086d1f82\") " Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.683821 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "91aa9cb1-7cd0-4cd3-9918-460b5d976ab7" (UID: "91aa9cb1-7cd0-4cd3-9918-460b5d976ab7"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.683854 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-kube-api-access-9gjbh" (OuterVolumeSpecName: "kube-api-access-9gjbh") pod "91aa9cb1-7cd0-4cd3-9918-460b5d976ab7" (UID: "91aa9cb1-7cd0-4cd3-9918-460b5d976ab7"). InnerVolumeSpecName "kube-api-access-9gjbh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.695856 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f13f74e-d8ac-4cd4-a6f6-4797086d1f82-kube-api-access-fhr4g" (OuterVolumeSpecName: "kube-api-access-fhr4g") pod "3f13f74e-d8ac-4cd4-a6f6-4797086d1f82" (UID: "3f13f74e-d8ac-4cd4-a6f6-4797086d1f82"). InnerVolumeSpecName "kube-api-access-fhr4g". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.700427 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "91aa9cb1-7cd0-4cd3-9918-460b5d976ab7" (UID: "91aa9cb1-7cd0-4cd3-9918-460b5d976ab7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.706534 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f13f74e-d8ac-4cd4-a6f6-4797086d1f82-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3f13f74e-d8ac-4cd4-a6f6-4797086d1f82" (UID: "3f13f74e-d8ac-4cd4-a6f6-4797086d1f82"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.722135 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-config-data" (OuterVolumeSpecName: "config-data") pod "91aa9cb1-7cd0-4cd3-9918-460b5d976ab7" (UID: "91aa9cb1-7cd0-4cd3-9918-460b5d976ab7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.722714 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f13f74e-d8ac-4cd4-a6f6-4797086d1f82-config-data" (OuterVolumeSpecName: "config-data") pod "3f13f74e-d8ac-4cd4-a6f6-4797086d1f82" (UID: "3f13f74e-d8ac-4cd4-a6f6-4797086d1f82"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.778750 5048 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.778827 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9gjbh\" (UniqueName: \"kubernetes.io/projected/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-kube-api-access-9gjbh\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.778845 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.778859 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f13f74e-d8ac-4cd4-a6f6-4797086d1f82-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.778873 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.778885 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fhr4g\" (UniqueName: \"kubernetes.io/projected/3f13f74e-d8ac-4cd4-a6f6-4797086d1f82-kube-api-access-fhr4g\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:13 crc kubenswrapper[5048]: I1213 06:49:13.778897 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f13f74e-d8ac-4cd4-a6f6-4797086d1f82-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.182319 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-hscms" event={"ID":"3f13f74e-d8ac-4cd4-a6f6-4797086d1f82","Type":"ContainerDied","Data":"51c83ac14f84afd3c2badf65666ccaf3aad1e1ce1c9e5049fab84fa3d7cb29ec"} Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.182358 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="51c83ac14f84afd3c2badf65666ccaf3aad1e1ce1c9e5049fab84fa3d7cb29ec" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.182548 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-hscms" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.190291 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-hgfjd" event={"ID":"91aa9cb1-7cd0-4cd3-9918-460b5d976ab7","Type":"ContainerDied","Data":"9d2e30435ff821a510b6e99fb8d4c8fa90bef6274ef2c9c9277762fdfc69ae39"} Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.190340 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d2e30435ff821a510b6e99fb8d4c8fa90bef6274ef2c9c9277762fdfc69ae39" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.190344 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-hgfjd" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.482408 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b868669f-4tc86"] Dec 13 06:49:14 crc kubenswrapper[5048]: E1213 06:49:14.482768 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f13f74e-d8ac-4cd4-a6f6-4797086d1f82" containerName="keystone-db-sync" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.482779 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f13f74e-d8ac-4cd4-a6f6-4797086d1f82" containerName="keystone-db-sync" Dec 13 06:49:14 crc kubenswrapper[5048]: E1213 06:49:14.482790 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94" containerName="mariadb-database-create" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.482797 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94" containerName="mariadb-database-create" Dec 13 06:49:14 crc kubenswrapper[5048]: E1213 06:49:14.482806 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73c1e78c-8d3f-45d1-976e-284ae877919e" containerName="dnsmasq-dns" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.482813 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="73c1e78c-8d3f-45d1-976e-284ae877919e" containerName="dnsmasq-dns" Dec 13 06:49:14 crc kubenswrapper[5048]: E1213 06:49:14.482839 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84147fa9-3a4b-467d-a9db-d8c4c3ff66e8" containerName="mariadb-account-create-update" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.482845 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="84147fa9-3a4b-467d-a9db-d8c4c3ff66e8" containerName="mariadb-account-create-update" Dec 13 06:49:14 crc kubenswrapper[5048]: E1213 06:49:14.482860 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91aa9cb1-7cd0-4cd3-9918-460b5d976ab7" containerName="glance-db-sync" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.482866 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="91aa9cb1-7cd0-4cd3-9918-460b5d976ab7" containerName="glance-db-sync" Dec 13 06:49:14 crc kubenswrapper[5048]: E1213 06:49:14.482881 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73c1e78c-8d3f-45d1-976e-284ae877919e" containerName="init" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.482887 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="73c1e78c-8d3f-45d1-976e-284ae877919e" containerName="init" Dec 13 06:49:14 crc kubenswrapper[5048]: E1213 06:49:14.482895 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56bc6cf2-a689-4bbe-a20e-beda8ebe0165" containerName="mariadb-account-create-update" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.482901 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="56bc6cf2-a689-4bbe-a20e-beda8ebe0165" containerName="mariadb-account-create-update" Dec 13 06:49:14 crc kubenswrapper[5048]: E1213 06:49:14.482913 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef4c1d97-0727-448e-910c-762a2f36ca80" containerName="mariadb-database-create" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.482919 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef4c1d97-0727-448e-910c-762a2f36ca80" containerName="mariadb-database-create" Dec 13 06:49:14 crc kubenswrapper[5048]: E1213 06:49:14.482927 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37e65177-fd1b-4fee-aad0-e089b9e9c47b" containerName="mariadb-database-create" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.482933 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="37e65177-fd1b-4fee-aad0-e089b9e9c47b" containerName="mariadb-database-create" Dec 13 06:49:14 crc kubenswrapper[5048]: E1213 06:49:14.482942 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a786107a-8b19-4bbd-9049-1d4590029976" containerName="mariadb-account-create-update" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.482947 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="a786107a-8b19-4bbd-9049-1d4590029976" containerName="mariadb-account-create-update" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.483092 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="37e65177-fd1b-4fee-aad0-e089b9e9c47b" containerName="mariadb-database-create" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.483103 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="a786107a-8b19-4bbd-9049-1d4590029976" containerName="mariadb-account-create-update" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.483114 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94" containerName="mariadb-database-create" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.483129 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="84147fa9-3a4b-467d-a9db-d8c4c3ff66e8" containerName="mariadb-account-create-update" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.483136 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="73c1e78c-8d3f-45d1-976e-284ae877919e" containerName="dnsmasq-dns" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.483142 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="56bc6cf2-a689-4bbe-a20e-beda8ebe0165" containerName="mariadb-account-create-update" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.483149 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f13f74e-d8ac-4cd4-a6f6-4797086d1f82" containerName="keystone-db-sync" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.483157 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="91aa9cb1-7cd0-4cd3-9918-460b5d976ab7" containerName="glance-db-sync" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.483165 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef4c1d97-0727-448e-910c-762a2f36ca80" containerName="mariadb-database-create" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.492386 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-4tc86" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.573536 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-l75gl"] Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.620033 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l75gl" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.620365 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-dns-svc\") pod \"dnsmasq-dns-5b868669f-4tc86\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " pod="openstack/dnsmasq-dns-5b868669f-4tc86" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.620452 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4l62m\" (UniqueName: \"kubernetes.io/projected/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-kube-api-access-4l62m\") pod \"dnsmasq-dns-5b868669f-4tc86\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " pod="openstack/dnsmasq-dns-5b868669f-4tc86" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.620483 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-dns-swift-storage-0\") pod \"dnsmasq-dns-5b868669f-4tc86\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " pod="openstack/dnsmasq-dns-5b868669f-4tc86" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.620558 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-config\") pod \"dnsmasq-dns-5b868669f-4tc86\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " pod="openstack/dnsmasq-dns-5b868669f-4tc86" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.620583 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-ovsdbserver-sb\") pod \"dnsmasq-dns-5b868669f-4tc86\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " pod="openstack/dnsmasq-dns-5b868669f-4tc86" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.620629 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-ovsdbserver-nb\") pod \"dnsmasq-dns-5b868669f-4tc86\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " pod="openstack/dnsmasq-dns-5b868669f-4tc86" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.628609 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.629008 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.629167 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.629399 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.630466 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-scw59" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.651504 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-4tc86"] Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.651877 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-l75gl"] Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.704944 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-8c8f677f9-25477"] Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.706384 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8c8f677f9-25477" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.712625 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.712827 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.713383 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.722629 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-wgfsn" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.726820 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-config\") pod \"dnsmasq-dns-5b868669f-4tc86\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " pod="openstack/dnsmasq-dns-5b868669f-4tc86" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.726862 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-ovsdbserver-sb\") pod \"dnsmasq-dns-5b868669f-4tc86\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " pod="openstack/dnsmasq-dns-5b868669f-4tc86" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.726897 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-credential-keys\") pod \"keystone-bootstrap-l75gl\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " pod="openstack/keystone-bootstrap-l75gl" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.726925 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-ovsdbserver-nb\") pod \"dnsmasq-dns-5b868669f-4tc86\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " pod="openstack/dnsmasq-dns-5b868669f-4tc86" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.726955 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lz6lb\" (UniqueName: \"kubernetes.io/projected/69610210-b3a1-4e28-b3f9-c5146e878d72-kube-api-access-lz6lb\") pod \"keystone-bootstrap-l75gl\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " pod="openstack/keystone-bootstrap-l75gl" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.726983 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-dns-svc\") pod \"dnsmasq-dns-5b868669f-4tc86\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " pod="openstack/dnsmasq-dns-5b868669f-4tc86" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.727009 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-combined-ca-bundle\") pod \"keystone-bootstrap-l75gl\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " pod="openstack/keystone-bootstrap-l75gl" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.727036 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4l62m\" (UniqueName: \"kubernetes.io/projected/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-kube-api-access-4l62m\") pod \"dnsmasq-dns-5b868669f-4tc86\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " pod="openstack/dnsmasq-dns-5b868669f-4tc86" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.727062 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-config-data\") pod \"keystone-bootstrap-l75gl\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " pod="openstack/keystone-bootstrap-l75gl" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.727081 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-dns-swift-storage-0\") pod \"dnsmasq-dns-5b868669f-4tc86\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " pod="openstack/dnsmasq-dns-5b868669f-4tc86" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.727109 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-fernet-keys\") pod \"keystone-bootstrap-l75gl\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " pod="openstack/keystone-bootstrap-l75gl" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.727134 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-scripts\") pod \"keystone-bootstrap-l75gl\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " pod="openstack/keystone-bootstrap-l75gl" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.727944 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-config\") pod \"dnsmasq-dns-5b868669f-4tc86\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " pod="openstack/dnsmasq-dns-5b868669f-4tc86" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.728205 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-dns-svc\") pod \"dnsmasq-dns-5b868669f-4tc86\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " pod="openstack/dnsmasq-dns-5b868669f-4tc86" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.728625 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-ovsdbserver-nb\") pod \"dnsmasq-dns-5b868669f-4tc86\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " pod="openstack/dnsmasq-dns-5b868669f-4tc86" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.728777 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-dns-swift-storage-0\") pod \"dnsmasq-dns-5b868669f-4tc86\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " pod="openstack/dnsmasq-dns-5b868669f-4tc86" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.728993 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-ovsdbserver-sb\") pod \"dnsmasq-dns-5b868669f-4tc86\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " pod="openstack/dnsmasq-dns-5b868669f-4tc86" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.732549 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-8c8f677f9-25477"] Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.751837 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-9xjhb"] Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.752867 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-9xjhb" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.771681 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-m5585" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.771858 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.771985 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.786230 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4l62m\" (UniqueName: \"kubernetes.io/projected/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-kube-api-access-4l62m\") pod \"dnsmasq-dns-5b868669f-4tc86\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " pod="openstack/dnsmasq-dns-5b868669f-4tc86" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.829252 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-credential-keys\") pod \"keystone-bootstrap-l75gl\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " pod="openstack/keystone-bootstrap-l75gl" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.829342 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9e51f251-b4ed-4920-b015-e9ac75c618b2-scripts\") pod \"horizon-8c8f677f9-25477\" (UID: \"9e51f251-b4ed-4920-b015-e9ac75c618b2\") " pod="openstack/horizon-8c8f677f9-25477" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.829403 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lz6lb\" (UniqueName: \"kubernetes.io/projected/69610210-b3a1-4e28-b3f9-c5146e878d72-kube-api-access-lz6lb\") pod \"keystone-bootstrap-l75gl\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " pod="openstack/keystone-bootstrap-l75gl" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.829449 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxjxq\" (UniqueName: \"kubernetes.io/projected/9e51f251-b4ed-4920-b015-e9ac75c618b2-kube-api-access-pxjxq\") pod \"horizon-8c8f677f9-25477\" (UID: \"9e51f251-b4ed-4920-b015-e9ac75c618b2\") " pod="openstack/horizon-8c8f677f9-25477" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.829502 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-combined-ca-bundle\") pod \"keystone-bootstrap-l75gl\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " pod="openstack/keystone-bootstrap-l75gl" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.829553 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-config-data\") pod \"keystone-bootstrap-l75gl\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " pod="openstack/keystone-bootstrap-l75gl" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.829577 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9e51f251-b4ed-4920-b015-e9ac75c618b2-config-data\") pod \"horizon-8c8f677f9-25477\" (UID: \"9e51f251-b4ed-4920-b015-e9ac75c618b2\") " pod="openstack/horizon-8c8f677f9-25477" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.829625 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-fernet-keys\") pod \"keystone-bootstrap-l75gl\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " pod="openstack/keystone-bootstrap-l75gl" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.829673 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e51f251-b4ed-4920-b015-e9ac75c618b2-logs\") pod \"horizon-8c8f677f9-25477\" (UID: \"9e51f251-b4ed-4920-b015-e9ac75c618b2\") " pod="openstack/horizon-8c8f677f9-25477" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.829707 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-scripts\") pod \"keystone-bootstrap-l75gl\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " pod="openstack/keystone-bootstrap-l75gl" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.829734 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9e51f251-b4ed-4920-b015-e9ac75c618b2-horizon-secret-key\") pod \"horizon-8c8f677f9-25477\" (UID: \"9e51f251-b4ed-4920-b015-e9ac75c618b2\") " pod="openstack/horizon-8c8f677f9-25477" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.834924 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-9xjhb"] Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.840368 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-credential-keys\") pod \"keystone-bootstrap-l75gl\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " pod="openstack/keystone-bootstrap-l75gl" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.841315 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-fernet-keys\") pod \"keystone-bootstrap-l75gl\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " pod="openstack/keystone-bootstrap-l75gl" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.841978 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-combined-ca-bundle\") pod \"keystone-bootstrap-l75gl\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " pod="openstack/keystone-bootstrap-l75gl" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.846850 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-scripts\") pod \"keystone-bootstrap-l75gl\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " pod="openstack/keystone-bootstrap-l75gl" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.851290 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-7ksgw"] Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.852581 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-7ksgw" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.853157 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-config-data\") pod \"keystone-bootstrap-l75gl\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " pod="openstack/keystone-bootstrap-l75gl" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.856902 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-4tc86" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.865934 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-7ksgw"] Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.896230 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-hxzvm" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.896287 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.896492 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.921737 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-4tc86"] Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.935464 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8a4a2da1-cc7f-474f-baf7-16c352bd0708-etc-machine-id\") pod \"cinder-db-sync-9xjhb\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " pod="openstack/cinder-db-sync-9xjhb" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.935543 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9e51f251-b4ed-4920-b015-e9ac75c618b2-scripts\") pod \"horizon-8c8f677f9-25477\" (UID: \"9e51f251-b4ed-4920-b015-e9ac75c618b2\") " pod="openstack/horizon-8c8f677f9-25477" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.935587 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n59rd\" (UniqueName: \"kubernetes.io/projected/8a4a2da1-cc7f-474f-baf7-16c352bd0708-kube-api-access-n59rd\") pod \"cinder-db-sync-9xjhb\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " pod="openstack/cinder-db-sync-9xjhb" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.935646 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxjxq\" (UniqueName: \"kubernetes.io/projected/9e51f251-b4ed-4920-b015-e9ac75c618b2-kube-api-access-pxjxq\") pod \"horizon-8c8f677f9-25477\" (UID: \"9e51f251-b4ed-4920-b015-e9ac75c618b2\") " pod="openstack/horizon-8c8f677f9-25477" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.935679 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9e51f251-b4ed-4920-b015-e9ac75c618b2-config-data\") pod \"horizon-8c8f677f9-25477\" (UID: \"9e51f251-b4ed-4920-b015-e9ac75c618b2\") " pod="openstack/horizon-8c8f677f9-25477" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.935714 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-db-sync-config-data\") pod \"cinder-db-sync-9xjhb\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " pod="openstack/cinder-db-sync-9xjhb" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.935748 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-combined-ca-bundle\") pod \"cinder-db-sync-9xjhb\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " pod="openstack/cinder-db-sync-9xjhb" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.935779 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e51f251-b4ed-4920-b015-e9ac75c618b2-logs\") pod \"horizon-8c8f677f9-25477\" (UID: \"9e51f251-b4ed-4920-b015-e9ac75c618b2\") " pod="openstack/horizon-8c8f677f9-25477" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.935825 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-scripts\") pod \"cinder-db-sync-9xjhb\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " pod="openstack/cinder-db-sync-9xjhb" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.935851 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9e51f251-b4ed-4920-b015-e9ac75c618b2-horizon-secret-key\") pod \"horizon-8c8f677f9-25477\" (UID: \"9e51f251-b4ed-4920-b015-e9ac75c618b2\") " pod="openstack/horizon-8c8f677f9-25477" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.935894 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-config-data\") pod \"cinder-db-sync-9xjhb\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " pod="openstack/cinder-db-sync-9xjhb" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.936038 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lz6lb\" (UniqueName: \"kubernetes.io/projected/69610210-b3a1-4e28-b3f9-c5146e878d72-kube-api-access-lz6lb\") pod \"keystone-bootstrap-l75gl\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " pod="openstack/keystone-bootstrap-l75gl" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.936767 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9e51f251-b4ed-4920-b015-e9ac75c618b2-scripts\") pod \"horizon-8c8f677f9-25477\" (UID: \"9e51f251-b4ed-4920-b015-e9ac75c618b2\") " pod="openstack/horizon-8c8f677f9-25477" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.937057 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e51f251-b4ed-4920-b015-e9ac75c618b2-logs\") pod \"horizon-8c8f677f9-25477\" (UID: \"9e51f251-b4ed-4920-b015-e9ac75c618b2\") " pod="openstack/horizon-8c8f677f9-25477" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.937270 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9e51f251-b4ed-4920-b015-e9ac75c618b2-config-data\") pod \"horizon-8c8f677f9-25477\" (UID: \"9e51f251-b4ed-4920-b015-e9ac75c618b2\") " pod="openstack/horizon-8c8f677f9-25477" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.955195 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9e51f251-b4ed-4920-b015-e9ac75c618b2-horizon-secret-key\") pod \"horizon-8c8f677f9-25477\" (UID: \"9e51f251-b4ed-4920-b015-e9ac75c618b2\") " pod="openstack/horizon-8c8f677f9-25477" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.971064 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l75gl" Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.982533 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-75nl7"] Dec 13 06:49:14 crc kubenswrapper[5048]: I1213 06:49:14.983830 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-75nl7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.007548 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.007794 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.007929 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-gjs7x" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.020757 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.022054 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxjxq\" (UniqueName: \"kubernetes.io/projected/9e51f251-b4ed-4920-b015-e9ac75c618b2-kube-api-access-pxjxq\") pod \"horizon-8c8f677f9-25477\" (UID: \"9e51f251-b4ed-4920-b015-e9ac75c618b2\") " pod="openstack/horizon-8c8f677f9-25477" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.023148 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.031823 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.039456 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-combined-ca-bundle\") pod \"cinder-db-sync-9xjhb\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " pod="openstack/cinder-db-sync-9xjhb" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.039515 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-scripts\") pod \"cinder-db-sync-9xjhb\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " pod="openstack/cinder-db-sync-9xjhb" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.039575 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-config-data\") pod \"cinder-db-sync-9xjhb\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " pod="openstack/cinder-db-sync-9xjhb" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.039614 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/34ff9369-df92-416e-a391-18b362cd491f-config\") pod \"neutron-db-sync-7ksgw\" (UID: \"34ff9369-df92-416e-a391-18b362cd491f\") " pod="openstack/neutron-db-sync-7ksgw" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.039641 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8a4a2da1-cc7f-474f-baf7-16c352bd0708-etc-machine-id\") pod \"cinder-db-sync-9xjhb\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " pod="openstack/cinder-db-sync-9xjhb" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.039673 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34ff9369-df92-416e-a391-18b362cd491f-combined-ca-bundle\") pod \"neutron-db-sync-7ksgw\" (UID: \"34ff9369-df92-416e-a391-18b362cd491f\") " pod="openstack/neutron-db-sync-7ksgw" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.039718 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8lwt\" (UniqueName: \"kubernetes.io/projected/34ff9369-df92-416e-a391-18b362cd491f-kube-api-access-r8lwt\") pod \"neutron-db-sync-7ksgw\" (UID: \"34ff9369-df92-416e-a391-18b362cd491f\") " pod="openstack/neutron-db-sync-7ksgw" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.039743 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n59rd\" (UniqueName: \"kubernetes.io/projected/8a4a2da1-cc7f-474f-baf7-16c352bd0708-kube-api-access-n59rd\") pod \"cinder-db-sync-9xjhb\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " pod="openstack/cinder-db-sync-9xjhb" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.039806 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-db-sync-config-data\") pod \"cinder-db-sync-9xjhb\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " pod="openstack/cinder-db-sync-9xjhb" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.044066 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8a4a2da1-cc7f-474f-baf7-16c352bd0708-etc-machine-id\") pod \"cinder-db-sync-9xjhb\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " pod="openstack/cinder-db-sync-9xjhb" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.053121 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-scripts\") pod \"cinder-db-sync-9xjhb\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " pod="openstack/cinder-db-sync-9xjhb" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.053201 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-75nl7"] Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.056623 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8c8f677f9-25477" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.058262 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-combined-ca-bundle\") pod \"cinder-db-sync-9xjhb\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " pod="openstack/cinder-db-sync-9xjhb" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.061247 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-db-sync-config-data\") pod \"cinder-db-sync-9xjhb\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " pod="openstack/cinder-db-sync-9xjhb" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.063610 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-config-data\") pod \"cinder-db-sync-9xjhb\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " pod="openstack/cinder-db-sync-9xjhb" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.064496 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.084165 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n59rd\" (UniqueName: \"kubernetes.io/projected/8a4a2da1-cc7f-474f-baf7-16c352bd0708-kube-api-access-n59rd\") pod \"cinder-db-sync-9xjhb\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " pod="openstack/cinder-db-sync-9xjhb" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.085094 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.085218 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-9xjhb" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.096549 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-qzjng"] Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.098066 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-qzjng" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.104560 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-qzjng"] Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.121750 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.123022 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.133030 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-9stwb" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.133218 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.133318 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.141246 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.141307 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qsj7\" (UniqueName: \"kubernetes.io/projected/e351b290-4e5b-496d-94be-545f01ae8e15-kube-api-access-4qsj7\") pod \"placement-db-sync-75nl7\" (UID: \"e351b290-4e5b-496d-94be-545f01ae8e15\") " pod="openstack/placement-db-sync-75nl7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.141371 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/34ff9369-df92-416e-a391-18b362cd491f-config\") pod \"neutron-db-sync-7ksgw\" (UID: \"34ff9369-df92-416e-a391-18b362cd491f\") " pod="openstack/neutron-db-sync-7ksgw" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.141394 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nfpxd\" (UniqueName: \"kubernetes.io/projected/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-kube-api-access-nfpxd\") pod \"ceilometer-0\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.141411 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-scripts\") pod \"ceilometer-0\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.141426 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-config-data\") pod \"ceilometer-0\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.141456 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e351b290-4e5b-496d-94be-545f01ae8e15-scripts\") pod \"placement-db-sync-75nl7\" (UID: \"e351b290-4e5b-496d-94be-545f01ae8e15\") " pod="openstack/placement-db-sync-75nl7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.141498 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e351b290-4e5b-496d-94be-545f01ae8e15-logs\") pod \"placement-db-sync-75nl7\" (UID: \"e351b290-4e5b-496d-94be-545f01ae8e15\") " pod="openstack/placement-db-sync-75nl7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.141517 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34ff9369-df92-416e-a391-18b362cd491f-combined-ca-bundle\") pod \"neutron-db-sync-7ksgw\" (UID: \"34ff9369-df92-416e-a391-18b362cd491f\") " pod="openstack/neutron-db-sync-7ksgw" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.141536 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e351b290-4e5b-496d-94be-545f01ae8e15-combined-ca-bundle\") pod \"placement-db-sync-75nl7\" (UID: \"e351b290-4e5b-496d-94be-545f01ae8e15\") " pod="openstack/placement-db-sync-75nl7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.141557 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-log-httpd\") pod \"ceilometer-0\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.141579 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8lwt\" (UniqueName: \"kubernetes.io/projected/34ff9369-df92-416e-a391-18b362cd491f-kube-api-access-r8lwt\") pod \"neutron-db-sync-7ksgw\" (UID: \"34ff9369-df92-416e-a391-18b362cd491f\") " pod="openstack/neutron-db-sync-7ksgw" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.141602 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.141652 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-run-httpd\") pod \"ceilometer-0\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.141682 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e351b290-4e5b-496d-94be-545f01ae8e15-config-data\") pod \"placement-db-sync-75nl7\" (UID: \"e351b290-4e5b-496d-94be-545f01ae8e15\") " pod="openstack/placement-db-sync-75nl7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.144786 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34ff9369-df92-416e-a391-18b362cd491f-combined-ca-bundle\") pod \"neutron-db-sync-7ksgw\" (UID: \"34ff9369-df92-416e-a391-18b362cd491f\") " pod="openstack/neutron-db-sync-7ksgw" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.164083 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/34ff9369-df92-416e-a391-18b362cd491f-config\") pod \"neutron-db-sync-7ksgw\" (UID: \"34ff9369-df92-416e-a391-18b362cd491f\") " pod="openstack/neutron-db-sync-7ksgw" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.213396 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-769d668ff7-qx7l7"] Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.214793 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-769d668ff7-qx7l7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.219218 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8lwt\" (UniqueName: \"kubernetes.io/projected/34ff9369-df92-416e-a391-18b362cd491f-kube-api-access-r8lwt\") pod \"neutron-db-sync-7ksgw\" (UID: \"34ff9369-df92-416e-a391-18b362cd491f\") " pod="openstack/neutron-db-sync-7ksgw" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.234538 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.243561 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6kr6\" (UniqueName: \"kubernetes.io/projected/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-kube-api-access-p6kr6\") pod \"glance-default-external-api-0\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.243603 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.243631 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-run-httpd\") pod \"ceilometer-0\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.247000 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e351b290-4e5b-496d-94be-545f01ae8e15-config-data\") pod \"placement-db-sync-75nl7\" (UID: \"e351b290-4e5b-496d-94be-545f01ae8e15\") " pod="openstack/placement-db-sync-75nl7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.247057 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.247080 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-config-data\") pod \"glance-default-external-api-0\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.247114 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.247135 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-logs\") pod \"glance-default-external-api-0\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.247163 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-ovsdbserver-sb\") pod \"dnsmasq-dns-cf78879c9-qzjng\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " pod="openstack/dnsmasq-dns-cf78879c9-qzjng" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.247613 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qsj7\" (UniqueName: \"kubernetes.io/projected/e351b290-4e5b-496d-94be-545f01ae8e15-kube-api-access-4qsj7\") pod \"placement-db-sync-75nl7\" (UID: \"e351b290-4e5b-496d-94be-545f01ae8e15\") " pod="openstack/placement-db-sync-75nl7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.247768 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-dns-svc\") pod \"dnsmasq-dns-cf78879c9-qzjng\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " pod="openstack/dnsmasq-dns-cf78879c9-qzjng" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.247811 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9c4gk\" (UniqueName: \"kubernetes.io/projected/394c78a7-046c-4cdd-89a0-42d812126386-kube-api-access-9c4gk\") pod \"dnsmasq-dns-cf78879c9-qzjng\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " pod="openstack/dnsmasq-dns-cf78879c9-qzjng" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.247888 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nfpxd\" (UniqueName: \"kubernetes.io/projected/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-kube-api-access-nfpxd\") pod \"ceilometer-0\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.247920 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-scripts\") pod \"ceilometer-0\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.248557 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-run-httpd\") pod \"ceilometer-0\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.248947 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-config-data\") pod \"ceilometer-0\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.248989 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.249013 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e351b290-4e5b-496d-94be-545f01ae8e15-scripts\") pod \"placement-db-sync-75nl7\" (UID: \"e351b290-4e5b-496d-94be-545f01ae8e15\") " pod="openstack/placement-db-sync-75nl7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.249042 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e351b290-4e5b-496d-94be-545f01ae8e15-logs\") pod \"placement-db-sync-75nl7\" (UID: \"e351b290-4e5b-496d-94be-545f01ae8e15\") " pod="openstack/placement-db-sync-75nl7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.249107 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e351b290-4e5b-496d-94be-545f01ae8e15-combined-ca-bundle\") pod \"placement-db-sync-75nl7\" (UID: \"e351b290-4e5b-496d-94be-545f01ae8e15\") " pod="openstack/placement-db-sync-75nl7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.249146 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-log-httpd\") pod \"ceilometer-0\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.249203 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-dns-swift-storage-0\") pod \"dnsmasq-dns-cf78879c9-qzjng\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " pod="openstack/dnsmasq-dns-cf78879c9-qzjng" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.249279 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.249350 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-ovsdbserver-nb\") pod \"dnsmasq-dns-cf78879c9-qzjng\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " pod="openstack/dnsmasq-dns-cf78879c9-qzjng" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.249465 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-scripts\") pod \"glance-default-external-api-0\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.249501 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-config\") pod \"dnsmasq-dns-cf78879c9-qzjng\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " pod="openstack/dnsmasq-dns-cf78879c9-qzjng" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.252531 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.262571 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e351b290-4e5b-496d-94be-545f01ae8e15-config-data\") pod \"placement-db-sync-75nl7\" (UID: \"e351b290-4e5b-496d-94be-545f01ae8e15\") " pod="openstack/placement-db-sync-75nl7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.263829 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-log-httpd\") pod \"ceilometer-0\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.271854 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e351b290-4e5b-496d-94be-545f01ae8e15-logs\") pod \"placement-db-sync-75nl7\" (UID: \"e351b290-4e5b-496d-94be-545f01ae8e15\") " pod="openstack/placement-db-sync-75nl7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.293206 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-config-data\") pod \"ceilometer-0\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.293887 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.306072 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-scripts\") pod \"ceilometer-0\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.309048 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-pzgrc"] Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.310634 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-pzgrc" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.316364 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-6lzvh" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.316702 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.317613 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nfpxd\" (UniqueName: \"kubernetes.io/projected/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-kube-api-access-nfpxd\") pod \"ceilometer-0\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.348663 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qsj7\" (UniqueName: \"kubernetes.io/projected/e351b290-4e5b-496d-94be-545f01ae8e15-kube-api-access-4qsj7\") pod \"placement-db-sync-75nl7\" (UID: \"e351b290-4e5b-496d-94be-545f01ae8e15\") " pod="openstack/placement-db-sync-75nl7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.349203 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e351b290-4e5b-496d-94be-545f01ae8e15-scripts\") pod \"placement-db-sync-75nl7\" (UID: \"e351b290-4e5b-496d-94be-545f01ae8e15\") " pod="openstack/placement-db-sync-75nl7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.350170 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e351b290-4e5b-496d-94be-545f01ae8e15-combined-ca-bundle\") pod \"placement-db-sync-75nl7\" (UID: \"e351b290-4e5b-496d-94be-545f01ae8e15\") " pod="openstack/placement-db-sync-75nl7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.351043 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-7ksgw" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.366308 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2476d7cd-fb97-45d1-a67d-8fb867d8f296-logs\") pod \"horizon-769d668ff7-qx7l7\" (UID: \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\") " pod="openstack/horizon-769d668ff7-qx7l7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.366516 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-ovsdbserver-nb\") pod \"dnsmasq-dns-cf78879c9-qzjng\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " pod="openstack/dnsmasq-dns-cf78879c9-qzjng" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.393032 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-scripts\") pod \"glance-default-external-api-0\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.393087 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-config\") pod \"dnsmasq-dns-cf78879c9-qzjng\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " pod="openstack/dnsmasq-dns-cf78879c9-qzjng" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.393150 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6kr6\" (UniqueName: \"kubernetes.io/projected/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-kube-api-access-p6kr6\") pod \"glance-default-external-api-0\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.393181 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.393247 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnwdm\" (UniqueName: \"kubernetes.io/projected/2476d7cd-fb97-45d1-a67d-8fb867d8f296-kube-api-access-fnwdm\") pod \"horizon-769d668ff7-qx7l7\" (UID: \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\") " pod="openstack/horizon-769d668ff7-qx7l7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.393285 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2476d7cd-fb97-45d1-a67d-8fb867d8f296-config-data\") pod \"horizon-769d668ff7-qx7l7\" (UID: \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\") " pod="openstack/horizon-769d668ff7-qx7l7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.393327 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-677wd\" (UniqueName: \"kubernetes.io/projected/af6830b6-96f2-487f-ba02-93c7f01d0ceb-kube-api-access-677wd\") pod \"barbican-db-sync-pzgrc\" (UID: \"af6830b6-96f2-487f-ba02-93c7f01d0ceb\") " pod="openstack/barbican-db-sync-pzgrc" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.393358 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.393376 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-config-data\") pod \"glance-default-external-api-0\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.393410 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2476d7cd-fb97-45d1-a67d-8fb867d8f296-scripts\") pod \"horizon-769d668ff7-qx7l7\" (UID: \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\") " pod="openstack/horizon-769d668ff7-qx7l7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.393430 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-logs\") pod \"glance-default-external-api-0\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.393478 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-ovsdbserver-sb\") pod \"dnsmasq-dns-cf78879c9-qzjng\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " pod="openstack/dnsmasq-dns-cf78879c9-qzjng" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.393508 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/af6830b6-96f2-487f-ba02-93c7f01d0ceb-db-sync-config-data\") pod \"barbican-db-sync-pzgrc\" (UID: \"af6830b6-96f2-487f-ba02-93c7f01d0ceb\") " pod="openstack/barbican-db-sync-pzgrc" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.393553 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-dns-svc\") pod \"dnsmasq-dns-cf78879c9-qzjng\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " pod="openstack/dnsmasq-dns-cf78879c9-qzjng" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.393585 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9c4gk\" (UniqueName: \"kubernetes.io/projected/394c78a7-046c-4cdd-89a0-42d812126386-kube-api-access-9c4gk\") pod \"dnsmasq-dns-cf78879c9-qzjng\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " pod="openstack/dnsmasq-dns-cf78879c9-qzjng" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.393648 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.393672 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2476d7cd-fb97-45d1-a67d-8fb867d8f296-horizon-secret-key\") pod \"horizon-769d668ff7-qx7l7\" (UID: \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\") " pod="openstack/horizon-769d668ff7-qx7l7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.393730 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-dns-swift-storage-0\") pod \"dnsmasq-dns-cf78879c9-qzjng\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " pod="openstack/dnsmasq-dns-cf78879c9-qzjng" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.393757 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af6830b6-96f2-487f-ba02-93c7f01d0ceb-combined-ca-bundle\") pod \"barbican-db-sync-pzgrc\" (UID: \"af6830b6-96f2-487f-ba02-93c7f01d0ceb\") " pod="openstack/barbican-db-sync-pzgrc" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.370871 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-75nl7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.415140 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-769d668ff7-qx7l7"] Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.433331 5048 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.436870 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-logs\") pod \"glance-default-external-api-0\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.442212 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.448249 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-qzjng"] Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.451962 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.454985 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-dns-swift-storage-0\") pod \"dnsmasq-dns-cf78879c9-qzjng\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " pod="openstack/dnsmasq-dns-cf78879c9-qzjng" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.455092 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-ovsdbserver-sb\") pod \"dnsmasq-dns-cf78879c9-qzjng\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " pod="openstack/dnsmasq-dns-cf78879c9-qzjng" Dec 13 06:49:15 crc kubenswrapper[5048]: E1213 06:49:15.455293 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config dns-svc dns-swift-storage-0 kube-api-access-9c4gk ovsdbserver-nb ovsdbserver-sb], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-cf78879c9-qzjng" podUID="394c78a7-046c-4cdd-89a0-42d812126386" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.455348 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-ovsdbserver-nb\") pod \"dnsmasq-dns-cf78879c9-qzjng\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " pod="openstack/dnsmasq-dns-cf78879c9-qzjng" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.455599 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-config-data\") pod \"glance-default-external-api-0\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.459860 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.462452 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-dns-svc\") pod \"dnsmasq-dns-cf78879c9-qzjng\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " pod="openstack/dnsmasq-dns-cf78879c9-qzjng" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.463059 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-config\") pod \"dnsmasq-dns-cf78879c9-qzjng\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " pod="openstack/dnsmasq-dns-cf78879c9-qzjng" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.464260 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-scripts\") pod \"glance-default-external-api-0\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.489585 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6kr6\" (UniqueName: \"kubernetes.io/projected/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-kube-api-access-p6kr6\") pod \"glance-default-external-api-0\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.492735 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9c4gk\" (UniqueName: \"kubernetes.io/projected/394c78a7-046c-4cdd-89a0-42d812126386-kube-api-access-9c4gk\") pod \"dnsmasq-dns-cf78879c9-qzjng\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " pod="openstack/dnsmasq-dns-cf78879c9-qzjng" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.492763 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-pzgrc"] Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.496561 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-677wd\" (UniqueName: \"kubernetes.io/projected/af6830b6-96f2-487f-ba02-93c7f01d0ceb-kube-api-access-677wd\") pod \"barbican-db-sync-pzgrc\" (UID: \"af6830b6-96f2-487f-ba02-93c7f01d0ceb\") " pod="openstack/barbican-db-sync-pzgrc" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.496934 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2476d7cd-fb97-45d1-a67d-8fb867d8f296-scripts\") pod \"horizon-769d668ff7-qx7l7\" (UID: \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\") " pod="openstack/horizon-769d668ff7-qx7l7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.497044 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/af6830b6-96f2-487f-ba02-93c7f01d0ceb-db-sync-config-data\") pod \"barbican-db-sync-pzgrc\" (UID: \"af6830b6-96f2-487f-ba02-93c7f01d0ceb\") " pod="openstack/barbican-db-sync-pzgrc" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.497194 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2476d7cd-fb97-45d1-a67d-8fb867d8f296-horizon-secret-key\") pod \"horizon-769d668ff7-qx7l7\" (UID: \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\") " pod="openstack/horizon-769d668ff7-qx7l7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.497331 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af6830b6-96f2-487f-ba02-93c7f01d0ceb-combined-ca-bundle\") pod \"barbican-db-sync-pzgrc\" (UID: \"af6830b6-96f2-487f-ba02-93c7f01d0ceb\") " pod="openstack/barbican-db-sync-pzgrc" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.497483 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2476d7cd-fb97-45d1-a67d-8fb867d8f296-logs\") pod \"horizon-769d668ff7-qx7l7\" (UID: \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\") " pod="openstack/horizon-769d668ff7-qx7l7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.497603 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fnwdm\" (UniqueName: \"kubernetes.io/projected/2476d7cd-fb97-45d1-a67d-8fb867d8f296-kube-api-access-fnwdm\") pod \"horizon-769d668ff7-qx7l7\" (UID: \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\") " pod="openstack/horizon-769d668ff7-qx7l7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.497724 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2476d7cd-fb97-45d1-a67d-8fb867d8f296-config-data\") pod \"horizon-769d668ff7-qx7l7\" (UID: \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\") " pod="openstack/horizon-769d668ff7-qx7l7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.516560 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2476d7cd-fb97-45d1-a67d-8fb867d8f296-scripts\") pod \"horizon-769d668ff7-qx7l7\" (UID: \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\") " pod="openstack/horizon-769d668ff7-qx7l7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.518566 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af6830b6-96f2-487f-ba02-93c7f01d0ceb-combined-ca-bundle\") pod \"barbican-db-sync-pzgrc\" (UID: \"af6830b6-96f2-487f-ba02-93c7f01d0ceb\") " pod="openstack/barbican-db-sync-pzgrc" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.519799 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2476d7cd-fb97-45d1-a67d-8fb867d8f296-config-data\") pod \"horizon-769d668ff7-qx7l7\" (UID: \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\") " pod="openstack/horizon-769d668ff7-qx7l7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.523689 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2476d7cd-fb97-45d1-a67d-8fb867d8f296-horizon-secret-key\") pod \"horizon-769d668ff7-qx7l7\" (UID: \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\") " pod="openstack/horizon-769d668ff7-qx7l7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.523954 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2476d7cd-fb97-45d1-a67d-8fb867d8f296-logs\") pod \"horizon-769d668ff7-qx7l7\" (UID: \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\") " pod="openstack/horizon-769d668ff7-qx7l7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.545952 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-677wd\" (UniqueName: \"kubernetes.io/projected/af6830b6-96f2-487f-ba02-93c7f01d0ceb-kube-api-access-677wd\") pod \"barbican-db-sync-pzgrc\" (UID: \"af6830b6-96f2-487f-ba02-93c7f01d0ceb\") " pod="openstack/barbican-db-sync-pzgrc" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.554266 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnwdm\" (UniqueName: \"kubernetes.io/projected/2476d7cd-fb97-45d1-a67d-8fb867d8f296-kube-api-access-fnwdm\") pod \"horizon-769d668ff7-qx7l7\" (UID: \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\") " pod="openstack/horizon-769d668ff7-qx7l7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.632611 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-769d668ff7-qx7l7" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.635524 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/af6830b6-96f2-487f-ba02-93c7f01d0ceb-db-sync-config-data\") pod \"barbican-db-sync-pzgrc\" (UID: \"af6830b6-96f2-487f-ba02-93c7f01d0ceb\") " pod="openstack/barbican-db-sync-pzgrc" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.640816 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-pmzx8"] Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.642841 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.657612 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.670097 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-pmzx8"] Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.702815 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-pzgrc" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.760151 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-config\") pod \"dnsmasq-dns-56df8fb6b7-pmzx8\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.760285 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-pmzx8\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.760891 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-pmzx8\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.760942 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6ttg\" (UniqueName: \"kubernetes.io/projected/6aaba4d3-75f7-4e64-83fb-95039921e50b-kube-api-access-j6ttg\") pod \"dnsmasq-dns-56df8fb6b7-pmzx8\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.760977 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-pmzx8\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.761005 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-pmzx8\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.785146 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.862985 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-config\") pod \"dnsmasq-dns-56df8fb6b7-pmzx8\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.863383 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-pmzx8\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.863450 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-pmzx8\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.863494 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6ttg\" (UniqueName: \"kubernetes.io/projected/6aaba4d3-75f7-4e64-83fb-95039921e50b-kube-api-access-j6ttg\") pod \"dnsmasq-dns-56df8fb6b7-pmzx8\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.863530 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-pmzx8\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.863556 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-pmzx8\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.864592 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-pmzx8\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.864708 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-pmzx8\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.865230 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-config\") pod \"dnsmasq-dns-56df8fb6b7-pmzx8\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.865827 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-pmzx8\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.869130 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-pmzx8\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.893662 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6ttg\" (UniqueName: \"kubernetes.io/projected/6aaba4d3-75f7-4e64-83fb-95039921e50b-kube-api-access-j6ttg\") pod \"dnsmasq-dns-56df8fb6b7-pmzx8\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.910556 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-8c8f677f9-25477"] Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.934462 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-9xjhb"] Dec 13 06:49:15 crc kubenswrapper[5048]: I1213 06:49:15.956720 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-4tc86"] Dec 13 06:49:16 crc kubenswrapper[5048]: I1213 06:49:16.069720 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:49:16 crc kubenswrapper[5048]: I1213 06:49:16.119798 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-l75gl"] Dec 13 06:49:16 crc kubenswrapper[5048]: I1213 06:49:16.216244 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:49:16 crc kubenswrapper[5048]: I1213 06:49:16.216902 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.269832 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l75gl" event={"ID":"69610210-b3a1-4e28-b3f9-c5146e878d72","Type":"ContainerStarted","Data":"47e38f8edbefd696fa098dd014177602421ed48d4b07676b017315e8610ce667"} Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.270866 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-9xjhb" event={"ID":"8a4a2da1-cc7f-474f-baf7-16c352bd0708","Type":"ContainerStarted","Data":"65610ced9a4b11ea57219dcbd3829663408aa05a64cf2a5a11c52f33f2321e15"} Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.273318 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b868669f-4tc86" event={"ID":"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1","Type":"ContainerStarted","Data":"769c6d9b441e87b21cb993b3e4946ed8ef88255729adf384f2b1c2f76d4e0216"} Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.278158 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-qzjng" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.278818 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8c8f677f9-25477" event={"ID":"9e51f251-b4ed-4920-b015-e9ac75c618b2","Type":"ContainerStarted","Data":"72907b6fb08b9fc56df2a93bad5f6f708d367676e666716753e62bf234ce9bd1"} Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.354210 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-qzjng" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.359391 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.395525 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-769d668ff7-qx7l7"] Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.406818 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.408270 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.410312 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.415483 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-75nl7"] Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.422498 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-7ksgw"] Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.433632 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 13 06:49:17 crc kubenswrapper[5048]: W1213 06:49:16.444877 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a87c44c_d2c7_4adf_bdbb_fae9a79b955b.slice/crio-b410c0704097d8afe05b1eaaa993c96a87517871e5f6a94b0746ece946f771a1 WatchSource:0}: Error finding container b410c0704097d8afe05b1eaaa993c96a87517871e5f6a94b0746ece946f771a1: Status 404 returned error can't find the container with id b410c0704097d8afe05b1eaaa993c96a87517871e5f6a94b0746ece946f771a1 Dec 13 06:49:17 crc kubenswrapper[5048]: W1213 06:49:16.450808 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod34ff9369_df92_416e_a391_18b362cd491f.slice/crio-87e0da901c7b05f90769adc2ad9dccdec99143859109daa9dbc6106ff58fe181 WatchSource:0}: Error finding container 87e0da901c7b05f90769adc2ad9dccdec99143859109daa9dbc6106ff58fe181: Status 404 returned error can't find the container with id 87e0da901c7b05f90769adc2ad9dccdec99143859109daa9dbc6106ff58fe181 Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.475269 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-config\") pod \"394c78a7-046c-4cdd-89a0-42d812126386\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.475522 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-ovsdbserver-nb\") pod \"394c78a7-046c-4cdd-89a0-42d812126386\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.475656 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9c4gk\" (UniqueName: \"kubernetes.io/projected/394c78a7-046c-4cdd-89a0-42d812126386-kube-api-access-9c4gk\") pod \"394c78a7-046c-4cdd-89a0-42d812126386\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.475701 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-ovsdbserver-sb\") pod \"394c78a7-046c-4cdd-89a0-42d812126386\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.475767 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-dns-svc\") pod \"394c78a7-046c-4cdd-89a0-42d812126386\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.475815 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-dns-swift-storage-0\") pod \"394c78a7-046c-4cdd-89a0-42d812126386\" (UID: \"394c78a7-046c-4cdd-89a0-42d812126386\") " Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.476022 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-74qtl\" (UniqueName: \"kubernetes.io/projected/468a6ac0-0a50-4f93-b213-0175c8b756c8-kube-api-access-74qtl\") pod \"glance-default-internal-api-0\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.476046 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.476113 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/468a6ac0-0a50-4f93-b213-0175c8b756c8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.476163 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/468a6ac0-0a50-4f93-b213-0175c8b756c8-logs\") pod \"glance-default-internal-api-0\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.476181 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/468a6ac0-0a50-4f93-b213-0175c8b756c8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.476267 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/468a6ac0-0a50-4f93-b213-0175c8b756c8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.476290 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/468a6ac0-0a50-4f93-b213-0175c8b756c8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.476663 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "394c78a7-046c-4cdd-89a0-42d812126386" (UID: "394c78a7-046c-4cdd-89a0-42d812126386"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.476071 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-config" (OuterVolumeSpecName: "config") pod "394c78a7-046c-4cdd-89a0-42d812126386" (UID: "394c78a7-046c-4cdd-89a0-42d812126386"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.477250 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "394c78a7-046c-4cdd-89a0-42d812126386" (UID: "394c78a7-046c-4cdd-89a0-42d812126386"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.477292 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "394c78a7-046c-4cdd-89a0-42d812126386" (UID: "394c78a7-046c-4cdd-89a0-42d812126386"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.477755 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "394c78a7-046c-4cdd-89a0-42d812126386" (UID: "394c78a7-046c-4cdd-89a0-42d812126386"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.484913 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/394c78a7-046c-4cdd-89a0-42d812126386-kube-api-access-9c4gk" (OuterVolumeSpecName: "kube-api-access-9c4gk") pod "394c78a7-046c-4cdd-89a0-42d812126386" (UID: "394c78a7-046c-4cdd-89a0-42d812126386"). InnerVolumeSpecName "kube-api-access-9c4gk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.578364 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/468a6ac0-0a50-4f93-b213-0175c8b756c8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.578414 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/468a6ac0-0a50-4f93-b213-0175c8b756c8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.578482 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/468a6ac0-0a50-4f93-b213-0175c8b756c8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.578550 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-74qtl\" (UniqueName: \"kubernetes.io/projected/468a6ac0-0a50-4f93-b213-0175c8b756c8-kube-api-access-74qtl\") pod \"glance-default-internal-api-0\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.578578 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.578667 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/468a6ac0-0a50-4f93-b213-0175c8b756c8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.578780 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/468a6ac0-0a50-4f93-b213-0175c8b756c8-logs\") pod \"glance-default-internal-api-0\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.578846 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9c4gk\" (UniqueName: \"kubernetes.io/projected/394c78a7-046c-4cdd-89a0-42d812126386-kube-api-access-9c4gk\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.578861 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.578872 5048 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.578885 5048 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.578898 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.578908 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/394c78a7-046c-4cdd-89a0-42d812126386-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.579356 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/468a6ac0-0a50-4f93-b213-0175c8b756c8-logs\") pod \"glance-default-internal-api-0\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.584317 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/468a6ac0-0a50-4f93-b213-0175c8b756c8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.584710 5048 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.586125 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/468a6ac0-0a50-4f93-b213-0175c8b756c8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.586427 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/468a6ac0-0a50-4f93-b213-0175c8b756c8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.589093 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/468a6ac0-0a50-4f93-b213-0175c8b756c8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.617054 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-74qtl\" (UniqueName: \"kubernetes.io/projected/468a6ac0-0a50-4f93-b213-0175c8b756c8-kube-api-access-74qtl\") pod \"glance-default-internal-api-0\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.622254 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:16.759898 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.098710 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.128951 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-8c8f677f9-25477"] Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.176610 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-85745844cf-bqdb4"] Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.195145 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-85745844cf-bqdb4"] Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.195301 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85745844cf-bqdb4" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.268536 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.298359 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/00a24942-715c-4fbd-bb97-1128504ef182-scripts\") pod \"horizon-85745844cf-bqdb4\" (UID: \"00a24942-715c-4fbd-bb97-1128504ef182\") " pod="openstack/horizon-85745844cf-bqdb4" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.298466 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/00a24942-715c-4fbd-bb97-1128504ef182-config-data\") pod \"horizon-85745844cf-bqdb4\" (UID: \"00a24942-715c-4fbd-bb97-1128504ef182\") " pod="openstack/horizon-85745844cf-bqdb4" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.298496 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8k2m\" (UniqueName: \"kubernetes.io/projected/00a24942-715c-4fbd-bb97-1128504ef182-kube-api-access-t8k2m\") pod \"horizon-85745844cf-bqdb4\" (UID: \"00a24942-715c-4fbd-bb97-1128504ef182\") " pod="openstack/horizon-85745844cf-bqdb4" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.298532 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/00a24942-715c-4fbd-bb97-1128504ef182-horizon-secret-key\") pod \"horizon-85745844cf-bqdb4\" (UID: \"00a24942-715c-4fbd-bb97-1128504ef182\") " pod="openstack/horizon-85745844cf-bqdb4" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.298553 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00a24942-715c-4fbd-bb97-1128504ef182-logs\") pod \"horizon-85745844cf-bqdb4\" (UID: \"00a24942-715c-4fbd-bb97-1128504ef182\") " pod="openstack/horizon-85745844cf-bqdb4" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.343647 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-7ksgw" event={"ID":"34ff9369-df92-416e-a391-18b362cd491f","Type":"ContainerStarted","Data":"91f972f3ba413690e89f2f05548d1680b8d74a87443549898fe18c686de2c727"} Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.344030 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-7ksgw" event={"ID":"34ff9369-df92-416e-a391-18b362cd491f","Type":"ContainerStarted","Data":"87e0da901c7b05f90769adc2ad9dccdec99143859109daa9dbc6106ff58fe181"} Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.347537 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b","Type":"ContainerStarted","Data":"b410c0704097d8afe05b1eaaa993c96a87517871e5f6a94b0746ece946f771a1"} Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.351966 5048 generic.go:334] "Generic (PLEG): container finished" podID="9ebd7773-8923-4ac4-827a-c5a97ab9b3b1" containerID="063d00e880b4a0d40ea7a217f9fa0d7a781a43413cd46273d6949ca534cd339e" exitCode=0 Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.352025 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b868669f-4tc86" event={"ID":"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1","Type":"ContainerDied","Data":"063d00e880b4a0d40ea7a217f9fa0d7a781a43413cd46273d6949ca534cd339e"} Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.357566 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-75nl7" event={"ID":"e351b290-4e5b-496d-94be-545f01ae8e15","Type":"ContainerStarted","Data":"c80d529b40b9df3f1eec4e8f22d881adcca7fbb134ff6a6d02fd540195206dd1"} Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.359711 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l75gl" event={"ID":"69610210-b3a1-4e28-b3f9-c5146e878d72","Type":"ContainerStarted","Data":"05a2958c3df88430c0619626d9cc5a043efcc9342e734c40f954d0c3e70041e6"} Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.363647 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-qzjng" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.364585 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-769d668ff7-qx7l7" event={"ID":"2476d7cd-fb97-45d1-a67d-8fb867d8f296","Type":"ContainerStarted","Data":"50e4f8d8be68038582635f116ea7bdf898c8cdceadd414b6df838274e8359d19"} Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.367757 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-7ksgw" podStartSLOduration=3.367745183 podStartE2EDuration="3.367745183s" podCreationTimestamp="2025-12-13 06:49:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:49:17.367037235 +0000 UTC m=+1191.233631826" watchObservedRunningTime="2025-12-13 06:49:17.367745183 +0000 UTC m=+1191.234339754" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.399490 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/00a24942-715c-4fbd-bb97-1128504ef182-config-data\") pod \"horizon-85745844cf-bqdb4\" (UID: \"00a24942-715c-4fbd-bb97-1128504ef182\") " pod="openstack/horizon-85745844cf-bqdb4" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.399532 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8k2m\" (UniqueName: \"kubernetes.io/projected/00a24942-715c-4fbd-bb97-1128504ef182-kube-api-access-t8k2m\") pod \"horizon-85745844cf-bqdb4\" (UID: \"00a24942-715c-4fbd-bb97-1128504ef182\") " pod="openstack/horizon-85745844cf-bqdb4" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.400310 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/00a24942-715c-4fbd-bb97-1128504ef182-horizon-secret-key\") pod \"horizon-85745844cf-bqdb4\" (UID: \"00a24942-715c-4fbd-bb97-1128504ef182\") " pod="openstack/horizon-85745844cf-bqdb4" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.400982 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00a24942-715c-4fbd-bb97-1128504ef182-logs\") pod \"horizon-85745844cf-bqdb4\" (UID: \"00a24942-715c-4fbd-bb97-1128504ef182\") " pod="openstack/horizon-85745844cf-bqdb4" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.401164 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/00a24942-715c-4fbd-bb97-1128504ef182-scripts\") pod \"horizon-85745844cf-bqdb4\" (UID: \"00a24942-715c-4fbd-bb97-1128504ef182\") " pod="openstack/horizon-85745844cf-bqdb4" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.405059 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/00a24942-715c-4fbd-bb97-1128504ef182-scripts\") pod \"horizon-85745844cf-bqdb4\" (UID: \"00a24942-715c-4fbd-bb97-1128504ef182\") " pod="openstack/horizon-85745844cf-bqdb4" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.405868 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00a24942-715c-4fbd-bb97-1128504ef182-logs\") pod \"horizon-85745844cf-bqdb4\" (UID: \"00a24942-715c-4fbd-bb97-1128504ef182\") " pod="openstack/horizon-85745844cf-bqdb4" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.405961 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/00a24942-715c-4fbd-bb97-1128504ef182-config-data\") pod \"horizon-85745844cf-bqdb4\" (UID: \"00a24942-715c-4fbd-bb97-1128504ef182\") " pod="openstack/horizon-85745844cf-bqdb4" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.449458 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8k2m\" (UniqueName: \"kubernetes.io/projected/00a24942-715c-4fbd-bb97-1128504ef182-kube-api-access-t8k2m\") pod \"horizon-85745844cf-bqdb4\" (UID: \"00a24942-715c-4fbd-bb97-1128504ef182\") " pod="openstack/horizon-85745844cf-bqdb4" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.459308 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/00a24942-715c-4fbd-bb97-1128504ef182-horizon-secret-key\") pod \"horizon-85745844cf-bqdb4\" (UID: \"00a24942-715c-4fbd-bb97-1128504ef182\") " pod="openstack/horizon-85745844cf-bqdb4" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.481951 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-l75gl" podStartSLOduration=3.481925725 podStartE2EDuration="3.481925725s" podCreationTimestamp="2025-12-13 06:49:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:49:17.442659935 +0000 UTC m=+1191.309254526" watchObservedRunningTime="2025-12-13 06:49:17.481925725 +0000 UTC m=+1191.348520306" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.552003 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85745844cf-bqdb4" Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.558890 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-pzgrc"] Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.585543 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-qzjng"] Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.602866 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-qzjng"] Dec 13 06:49:17 crc kubenswrapper[5048]: W1213 06:49:17.604316 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf6830b6_96f2_487f_ba02_93c7f01d0ceb.slice/crio-8386a0287dd9ccbecb46b778d12bc7a3212b9a585eb7b8a15e454a0573bd7da3 WatchSource:0}: Error finding container 8386a0287dd9ccbecb46b778d12bc7a3212b9a585eb7b8a15e454a0573bd7da3: Status 404 returned error can't find the container with id 8386a0287dd9ccbecb46b778d12bc7a3212b9a585eb7b8a15e454a0573bd7da3 Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.651925 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-pmzx8"] Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.661129 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:49:17 crc kubenswrapper[5048]: W1213 06:49:17.730457 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6aaba4d3_75f7_4e64_83fb_95039921e50b.slice/crio-b74b82ddb076f848159be4b97c3bdc57054725ba9160ffd1a2b686a8fc9874df WatchSource:0}: Error finding container b74b82ddb076f848159be4b97c3bdc57054725ba9160ffd1a2b686a8fc9874df: Status 404 returned error can't find the container with id b74b82ddb076f848159be4b97c3bdc57054725ba9160ffd1a2b686a8fc9874df Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.811625 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 13 06:49:17 crc kubenswrapper[5048]: I1213 06:49:17.976862 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.120841 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-4tc86" Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.132802 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-dns-swift-storage-0\") pod \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.132875 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-ovsdbserver-nb\") pod \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.132931 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-config\") pod \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.132991 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4l62m\" (UniqueName: \"kubernetes.io/projected/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-kube-api-access-4l62m\") pod \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.133056 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-ovsdbserver-sb\") pod \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.133155 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-dns-svc\") pod \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\" (UID: \"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1\") " Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.154877 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-kube-api-access-4l62m" (OuterVolumeSpecName: "kube-api-access-4l62m") pod "9ebd7773-8923-4ac4-827a-c5a97ab9b3b1" (UID: "9ebd7773-8923-4ac4-827a-c5a97ab9b3b1"). InnerVolumeSpecName "kube-api-access-4l62m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.175923 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "9ebd7773-8923-4ac4-827a-c5a97ab9b3b1" (UID: "9ebd7773-8923-4ac4-827a-c5a97ab9b3b1"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.187875 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9ebd7773-8923-4ac4-827a-c5a97ab9b3b1" (UID: "9ebd7773-8923-4ac4-827a-c5a97ab9b3b1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.207966 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-config" (OuterVolumeSpecName: "config") pod "9ebd7773-8923-4ac4-827a-c5a97ab9b3b1" (UID: "9ebd7773-8923-4ac4-827a-c5a97ab9b3b1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.219846 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9ebd7773-8923-4ac4-827a-c5a97ab9b3b1" (UID: "9ebd7773-8923-4ac4-827a-c5a97ab9b3b1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.230109 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9ebd7773-8923-4ac4-827a-c5a97ab9b3b1" (UID: "9ebd7773-8923-4ac4-827a-c5a97ab9b3b1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.247585 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.247621 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.247633 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4l62m\" (UniqueName: \"kubernetes.io/projected/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-kube-api-access-4l62m\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.247643 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.247651 5048 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.247659 5048 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.254418 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-85745844cf-bqdb4"] Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.392016 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b","Type":"ContainerStarted","Data":"b2f4380a8a428da29e4aee9bb5c44bcf5b871f6c9a9d5f94d7de270585d127c2"} Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.395013 5048 generic.go:334] "Generic (PLEG): container finished" podID="6aaba4d3-75f7-4e64-83fb-95039921e50b" containerID="67d8f9858481513d0a5bc6370ee0ed3b40ea58ab3bdd16cd4b01391f5143e77a" exitCode=0 Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.395079 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" event={"ID":"6aaba4d3-75f7-4e64-83fb-95039921e50b","Type":"ContainerDied","Data":"67d8f9858481513d0a5bc6370ee0ed3b40ea58ab3bdd16cd4b01391f5143e77a"} Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.395103 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" event={"ID":"6aaba4d3-75f7-4e64-83fb-95039921e50b","Type":"ContainerStarted","Data":"b74b82ddb076f848159be4b97c3bdc57054725ba9160ffd1a2b686a8fc9874df"} Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.438777 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-pzgrc" event={"ID":"af6830b6-96f2-487f-ba02-93c7f01d0ceb","Type":"ContainerStarted","Data":"8386a0287dd9ccbecb46b778d12bc7a3212b9a585eb7b8a15e454a0573bd7da3"} Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.480541 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"468a6ac0-0a50-4f93-b213-0175c8b756c8","Type":"ContainerStarted","Data":"b0408d10c356c3e6e3e162d8e4f14f2f2066bd2af538fbeeb7b8b6296fe1ea3a"} Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.501490 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b868669f-4tc86" event={"ID":"9ebd7773-8923-4ac4-827a-c5a97ab9b3b1","Type":"ContainerDied","Data":"769c6d9b441e87b21cb993b3e4946ed8ef88255729adf384f2b1c2f76d4e0216"} Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.501838 5048 scope.go:117] "RemoveContainer" containerID="063d00e880b4a0d40ea7a217f9fa0d7a781a43413cd46273d6949ca534cd339e" Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.501964 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-4tc86" Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.513757 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85745844cf-bqdb4" event={"ID":"00a24942-715c-4fbd-bb97-1128504ef182","Type":"ContainerStarted","Data":"5ebd77429989d64f58f346bd3daf343dd4956e5b232f913cfd25efb52932a358"} Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.611023 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="394c78a7-046c-4cdd-89a0-42d812126386" path="/var/lib/kubelet/pods/394c78a7-046c-4cdd-89a0-42d812126386/volumes" Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.675085 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-4tc86"] Dec 13 06:49:18 crc kubenswrapper[5048]: I1213 06:49:18.688130 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-4tc86"] Dec 13 06:49:20 crc kubenswrapper[5048]: I1213 06:49:20.543181 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"468a6ac0-0a50-4f93-b213-0175c8b756c8","Type":"ContainerStarted","Data":"9a118aa7c8475d5f77b0fe6d135f228d5c2b9bd4156b3d715d9a1fbe9b11231a"} Dec 13 06:49:20 crc kubenswrapper[5048]: I1213 06:49:20.546276 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b","Type":"ContainerStarted","Data":"2f4de111509142f7f86ffbd90cf48a33b61f59a9ab2c14cf35dec3067eed7529"} Dec 13 06:49:20 crc kubenswrapper[5048]: I1213 06:49:20.549233 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" event={"ID":"6aaba4d3-75f7-4e64-83fb-95039921e50b","Type":"ContainerStarted","Data":"4180f53c8df8d329aec89da7fe89a5ae17927f4b50bc304d8b7f4c2017c56471"} Dec 13 06:49:20 crc kubenswrapper[5048]: I1213 06:49:20.550210 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:49:20 crc kubenswrapper[5048]: I1213 06:49:20.586850 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ebd7773-8923-4ac4-827a-c5a97ab9b3b1" path="/var/lib/kubelet/pods/9ebd7773-8923-4ac4-827a-c5a97ab9b3b1/volumes" Dec 13 06:49:22 crc kubenswrapper[5048]: I1213 06:49:22.570022 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="33ee9f9c-b391-48fe-80f1-7d078b8e7b5b" containerName="glance-log" containerID="cri-o://2f4de111509142f7f86ffbd90cf48a33b61f59a9ab2c14cf35dec3067eed7529" gracePeriod=30 Dec 13 06:49:22 crc kubenswrapper[5048]: I1213 06:49:22.570114 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="33ee9f9c-b391-48fe-80f1-7d078b8e7b5b" containerName="glance-httpd" containerID="cri-o://7c2b47110a25f9e5409299bde9b79667ce0a70c556df346e5228288cc3d906cc" gracePeriod=30 Dec 13 06:49:22 crc kubenswrapper[5048]: I1213 06:49:22.572309 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="468a6ac0-0a50-4f93-b213-0175c8b756c8" containerName="glance-log" containerID="cri-o://9a118aa7c8475d5f77b0fe6d135f228d5c2b9bd4156b3d715d9a1fbe9b11231a" gracePeriod=30 Dec 13 06:49:22 crc kubenswrapper[5048]: I1213 06:49:22.572383 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="468a6ac0-0a50-4f93-b213-0175c8b756c8" containerName="glance-httpd" containerID="cri-o://3fa964f8f4ff55b5940f2d0d3446730b9defe0491e61ee9dbb0b557d14a262c5" gracePeriod=30 Dec 13 06:49:22 crc kubenswrapper[5048]: I1213 06:49:22.578091 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b","Type":"ContainerStarted","Data":"7c2b47110a25f9e5409299bde9b79667ce0a70c556df346e5228288cc3d906cc"} Dec 13 06:49:22 crc kubenswrapper[5048]: I1213 06:49:22.578321 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"468a6ac0-0a50-4f93-b213-0175c8b756c8","Type":"ContainerStarted","Data":"3fa964f8f4ff55b5940f2d0d3446730b9defe0491e61ee9dbb0b557d14a262c5"} Dec 13 06:49:22 crc kubenswrapper[5048]: I1213 06:49:22.604965 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=7.604948504 podStartE2EDuration="7.604948504s" podCreationTimestamp="2025-12-13 06:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:49:22.604559773 +0000 UTC m=+1196.471154374" watchObservedRunningTime="2025-12-13 06:49:22.604948504 +0000 UTC m=+1196.471543085" Dec 13 06:49:22 crc kubenswrapper[5048]: I1213 06:49:22.606103 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" podStartSLOduration=7.606095995 podStartE2EDuration="7.606095995s" podCreationTimestamp="2025-12-13 06:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:49:20.577024674 +0000 UTC m=+1194.443619255" watchObservedRunningTime="2025-12-13 06:49:22.606095995 +0000 UTC m=+1196.472690576" Dec 13 06:49:22 crc kubenswrapper[5048]: I1213 06:49:22.637734 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=7.637714697 podStartE2EDuration="7.637714697s" podCreationTimestamp="2025-12-13 06:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:49:22.6341816 +0000 UTC m=+1196.500776211" watchObservedRunningTime="2025-12-13 06:49:22.637714697 +0000 UTC m=+1196.504309298" Dec 13 06:49:22 crc kubenswrapper[5048]: E1213 06:49:22.684936 5048 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod33ee9f9c_b391_48fe_80f1_7d078b8e7b5b.slice/crio-2f4de111509142f7f86ffbd90cf48a33b61f59a9ab2c14cf35dec3067eed7529.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod468a6ac0_0a50_4f93_b213_0175c8b756c8.slice/crio-9a118aa7c8475d5f77b0fe6d135f228d5c2b9bd4156b3d715d9a1fbe9b11231a.scope\": RecentStats: unable to find data in memory cache]" Dec 13 06:49:23 crc kubenswrapper[5048]: I1213 06:49:23.582744 5048 generic.go:334] "Generic (PLEG): container finished" podID="468a6ac0-0a50-4f93-b213-0175c8b756c8" containerID="3fa964f8f4ff55b5940f2d0d3446730b9defe0491e61ee9dbb0b557d14a262c5" exitCode=0 Dec 13 06:49:23 crc kubenswrapper[5048]: I1213 06:49:23.583084 5048 generic.go:334] "Generic (PLEG): container finished" podID="468a6ac0-0a50-4f93-b213-0175c8b756c8" containerID="9a118aa7c8475d5f77b0fe6d135f228d5c2b9bd4156b3d715d9a1fbe9b11231a" exitCode=143 Dec 13 06:49:23 crc kubenswrapper[5048]: I1213 06:49:23.582830 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"468a6ac0-0a50-4f93-b213-0175c8b756c8","Type":"ContainerDied","Data":"3fa964f8f4ff55b5940f2d0d3446730b9defe0491e61ee9dbb0b557d14a262c5"} Dec 13 06:49:23 crc kubenswrapper[5048]: I1213 06:49:23.583148 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"468a6ac0-0a50-4f93-b213-0175c8b756c8","Type":"ContainerDied","Data":"9a118aa7c8475d5f77b0fe6d135f228d5c2b9bd4156b3d715d9a1fbe9b11231a"} Dec 13 06:49:23 crc kubenswrapper[5048]: I1213 06:49:23.587873 5048 generic.go:334] "Generic (PLEG): container finished" podID="69610210-b3a1-4e28-b3f9-c5146e878d72" containerID="05a2958c3df88430c0619626d9cc5a043efcc9342e734c40f954d0c3e70041e6" exitCode=0 Dec 13 06:49:23 crc kubenswrapper[5048]: I1213 06:49:23.587972 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l75gl" event={"ID":"69610210-b3a1-4e28-b3f9-c5146e878d72","Type":"ContainerDied","Data":"05a2958c3df88430c0619626d9cc5a043efcc9342e734c40f954d0c3e70041e6"} Dec 13 06:49:23 crc kubenswrapper[5048]: I1213 06:49:23.600191 5048 generic.go:334] "Generic (PLEG): container finished" podID="33ee9f9c-b391-48fe-80f1-7d078b8e7b5b" containerID="7c2b47110a25f9e5409299bde9b79667ce0a70c556df346e5228288cc3d906cc" exitCode=0 Dec 13 06:49:23 crc kubenswrapper[5048]: I1213 06:49:23.600226 5048 generic.go:334] "Generic (PLEG): container finished" podID="33ee9f9c-b391-48fe-80f1-7d078b8e7b5b" containerID="2f4de111509142f7f86ffbd90cf48a33b61f59a9ab2c14cf35dec3067eed7529" exitCode=143 Dec 13 06:49:23 crc kubenswrapper[5048]: I1213 06:49:23.600253 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b","Type":"ContainerDied","Data":"7c2b47110a25f9e5409299bde9b79667ce0a70c556df346e5228288cc3d906cc"} Dec 13 06:49:23 crc kubenswrapper[5048]: I1213 06:49:23.600280 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b","Type":"ContainerDied","Data":"2f4de111509142f7f86ffbd90cf48a33b61f59a9ab2c14cf35dec3067eed7529"} Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.072607 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.150060 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-2kjcf"] Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.150341 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" podUID="b53097b1-3126-4e17-b910-d2b7f57ec87e" containerName="dnsmasq-dns" containerID="cri-o://6a3d15e2b73d49faf7e2bc2dde00bdf8e7146833acddabd1d2461771ec1f3cd7" gracePeriod=10 Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.414617 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-769d668ff7-qx7l7"] Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.440753 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5fdc45567b-kg45h"] Dec 13 06:49:26 crc kubenswrapper[5048]: E1213 06:49:26.446404 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ebd7773-8923-4ac4-827a-c5a97ab9b3b1" containerName="init" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.446464 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ebd7773-8923-4ac4-827a-c5a97ab9b3b1" containerName="init" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.446908 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ebd7773-8923-4ac4-827a-c5a97ab9b3b1" containerName="init" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.448921 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.452201 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.474992 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5fdc45567b-kg45h"] Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.549545 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3c13197f-14c3-45a9-ba9c-bc89b80d6169-horizon-secret-key\") pod \"horizon-5fdc45567b-kg45h\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.549601 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c13197f-14c3-45a9-ba9c-bc89b80d6169-horizon-tls-certs\") pod \"horizon-5fdc45567b-kg45h\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.549641 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c13197f-14c3-45a9-ba9c-bc89b80d6169-combined-ca-bundle\") pod \"horizon-5fdc45567b-kg45h\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.549672 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3c13197f-14c3-45a9-ba9c-bc89b80d6169-config-data\") pod \"horizon-5fdc45567b-kg45h\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.549709 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c13197f-14c3-45a9-ba9c-bc89b80d6169-logs\") pod \"horizon-5fdc45567b-kg45h\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.549734 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n82nj\" (UniqueName: \"kubernetes.io/projected/3c13197f-14c3-45a9-ba9c-bc89b80d6169-kube-api-access-n82nj\") pod \"horizon-5fdc45567b-kg45h\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.549768 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3c13197f-14c3-45a9-ba9c-bc89b80d6169-scripts\") pod \"horizon-5fdc45567b-kg45h\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.567270 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-85745844cf-bqdb4"] Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.603150 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-657fc95f76-vznd4"] Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.604670 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.619501 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-657fc95f76-vznd4"] Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.651546 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c13197f-14c3-45a9-ba9c-bc89b80d6169-logs\") pod \"horizon-5fdc45567b-kg45h\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.651598 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n82nj\" (UniqueName: \"kubernetes.io/projected/3c13197f-14c3-45a9-ba9c-bc89b80d6169-kube-api-access-n82nj\") pod \"horizon-5fdc45567b-kg45h\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.651628 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3c13197f-14c3-45a9-ba9c-bc89b80d6169-scripts\") pod \"horizon-5fdc45567b-kg45h\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.651681 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3c13197f-14c3-45a9-ba9c-bc89b80d6169-horizon-secret-key\") pod \"horizon-5fdc45567b-kg45h\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.651713 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c13197f-14c3-45a9-ba9c-bc89b80d6169-horizon-tls-certs\") pod \"horizon-5fdc45567b-kg45h\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.651749 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c13197f-14c3-45a9-ba9c-bc89b80d6169-combined-ca-bundle\") pod \"horizon-5fdc45567b-kg45h\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.651776 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3c13197f-14c3-45a9-ba9c-bc89b80d6169-config-data\") pod \"horizon-5fdc45567b-kg45h\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.654394 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c13197f-14c3-45a9-ba9c-bc89b80d6169-logs\") pod \"horizon-5fdc45567b-kg45h\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.654412 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.655487 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3c13197f-14c3-45a9-ba9c-bc89b80d6169-config-data\") pod \"horizon-5fdc45567b-kg45h\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.656004 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3c13197f-14c3-45a9-ba9c-bc89b80d6169-scripts\") pod \"horizon-5fdc45567b-kg45h\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.664614 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c13197f-14c3-45a9-ba9c-bc89b80d6169-combined-ca-bundle\") pod \"horizon-5fdc45567b-kg45h\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.678830 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3c13197f-14c3-45a9-ba9c-bc89b80d6169-horizon-secret-key\") pod \"horizon-5fdc45567b-kg45h\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.680409 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c13197f-14c3-45a9-ba9c-bc89b80d6169-horizon-tls-certs\") pod \"horizon-5fdc45567b-kg45h\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.682017 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n82nj\" (UniqueName: \"kubernetes.io/projected/3c13197f-14c3-45a9-ba9c-bc89b80d6169-kube-api-access-n82nj\") pod \"horizon-5fdc45567b-kg45h\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.752921 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1a49463c-d974-4631-b6ef-3f88d734ac2d-config-data\") pod \"horizon-657fc95f76-vznd4\" (UID: \"1a49463c-d974-4631-b6ef-3f88d734ac2d\") " pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.753086 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1a49463c-d974-4631-b6ef-3f88d734ac2d-logs\") pod \"horizon-657fc95f76-vznd4\" (UID: \"1a49463c-d974-4631-b6ef-3f88d734ac2d\") " pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.753116 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a49463c-d974-4631-b6ef-3f88d734ac2d-combined-ca-bundle\") pod \"horizon-657fc95f76-vznd4\" (UID: \"1a49463c-d974-4631-b6ef-3f88d734ac2d\") " pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.753154 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rl9jj\" (UniqueName: \"kubernetes.io/projected/1a49463c-d974-4631-b6ef-3f88d734ac2d-kube-api-access-rl9jj\") pod \"horizon-657fc95f76-vznd4\" (UID: \"1a49463c-d974-4631-b6ef-3f88d734ac2d\") " pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.753246 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a49463c-d974-4631-b6ef-3f88d734ac2d-horizon-tls-certs\") pod \"horizon-657fc95f76-vznd4\" (UID: \"1a49463c-d974-4631-b6ef-3f88d734ac2d\") " pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.753308 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/1a49463c-d974-4631-b6ef-3f88d734ac2d-horizon-secret-key\") pod \"horizon-657fc95f76-vznd4\" (UID: \"1a49463c-d974-4631-b6ef-3f88d734ac2d\") " pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.753328 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1a49463c-d974-4631-b6ef-3f88d734ac2d-scripts\") pod \"horizon-657fc95f76-vznd4\" (UID: \"1a49463c-d974-4631-b6ef-3f88d734ac2d\") " pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.791496 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.855007 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rl9jj\" (UniqueName: \"kubernetes.io/projected/1a49463c-d974-4631-b6ef-3f88d734ac2d-kube-api-access-rl9jj\") pod \"horizon-657fc95f76-vznd4\" (UID: \"1a49463c-d974-4631-b6ef-3f88d734ac2d\") " pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.855072 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a49463c-d974-4631-b6ef-3f88d734ac2d-horizon-tls-certs\") pod \"horizon-657fc95f76-vznd4\" (UID: \"1a49463c-d974-4631-b6ef-3f88d734ac2d\") " pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.855108 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/1a49463c-d974-4631-b6ef-3f88d734ac2d-horizon-secret-key\") pod \"horizon-657fc95f76-vznd4\" (UID: \"1a49463c-d974-4631-b6ef-3f88d734ac2d\") " pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.855128 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1a49463c-d974-4631-b6ef-3f88d734ac2d-scripts\") pod \"horizon-657fc95f76-vznd4\" (UID: \"1a49463c-d974-4631-b6ef-3f88d734ac2d\") " pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.855149 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1a49463c-d974-4631-b6ef-3f88d734ac2d-config-data\") pod \"horizon-657fc95f76-vznd4\" (UID: \"1a49463c-d974-4631-b6ef-3f88d734ac2d\") " pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.855201 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1a49463c-d974-4631-b6ef-3f88d734ac2d-logs\") pod \"horizon-657fc95f76-vznd4\" (UID: \"1a49463c-d974-4631-b6ef-3f88d734ac2d\") " pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.855227 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a49463c-d974-4631-b6ef-3f88d734ac2d-combined-ca-bundle\") pod \"horizon-657fc95f76-vznd4\" (UID: \"1a49463c-d974-4631-b6ef-3f88d734ac2d\") " pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.856195 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1a49463c-d974-4631-b6ef-3f88d734ac2d-logs\") pod \"horizon-657fc95f76-vznd4\" (UID: \"1a49463c-d974-4631-b6ef-3f88d734ac2d\") " pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.856552 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1a49463c-d974-4631-b6ef-3f88d734ac2d-scripts\") pod \"horizon-657fc95f76-vznd4\" (UID: \"1a49463c-d974-4631-b6ef-3f88d734ac2d\") " pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.857888 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1a49463c-d974-4631-b6ef-3f88d734ac2d-config-data\") pod \"horizon-657fc95f76-vznd4\" (UID: \"1a49463c-d974-4631-b6ef-3f88d734ac2d\") " pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.858968 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/1a49463c-d974-4631-b6ef-3f88d734ac2d-horizon-secret-key\") pod \"horizon-657fc95f76-vznd4\" (UID: \"1a49463c-d974-4631-b6ef-3f88d734ac2d\") " pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.859074 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a49463c-d974-4631-b6ef-3f88d734ac2d-horizon-tls-certs\") pod \"horizon-657fc95f76-vznd4\" (UID: \"1a49463c-d974-4631-b6ef-3f88d734ac2d\") " pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.863357 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a49463c-d974-4631-b6ef-3f88d734ac2d-combined-ca-bundle\") pod \"horizon-657fc95f76-vznd4\" (UID: \"1a49463c-d974-4631-b6ef-3f88d734ac2d\") " pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.870975 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rl9jj\" (UniqueName: \"kubernetes.io/projected/1a49463c-d974-4631-b6ef-3f88d734ac2d-kube-api-access-rl9jj\") pod \"horizon-657fc95f76-vznd4\" (UID: \"1a49463c-d974-4631-b6ef-3f88d734ac2d\") " pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:26 crc kubenswrapper[5048]: I1213 06:49:26.944551 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:27 crc kubenswrapper[5048]: I1213 06:49:27.646970 5048 generic.go:334] "Generic (PLEG): container finished" podID="b53097b1-3126-4e17-b910-d2b7f57ec87e" containerID="6a3d15e2b73d49faf7e2bc2dde00bdf8e7146833acddabd1d2461771ec1f3cd7" exitCode=0 Dec 13 06:49:27 crc kubenswrapper[5048]: I1213 06:49:27.647011 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" event={"ID":"b53097b1-3126-4e17-b910-d2b7f57ec87e","Type":"ContainerDied","Data":"6a3d15e2b73d49faf7e2bc2dde00bdf8e7146833acddabd1d2461771ec1f3cd7"} Dec 13 06:49:29 crc kubenswrapper[5048]: I1213 06:49:29.297836 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l75gl" Dec 13 06:49:29 crc kubenswrapper[5048]: I1213 06:49:29.429419 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-credential-keys\") pod \"69610210-b3a1-4e28-b3f9-c5146e878d72\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " Dec 13 06:49:29 crc kubenswrapper[5048]: I1213 06:49:29.429593 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-fernet-keys\") pod \"69610210-b3a1-4e28-b3f9-c5146e878d72\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " Dec 13 06:49:29 crc kubenswrapper[5048]: I1213 06:49:29.429620 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-config-data\") pod \"69610210-b3a1-4e28-b3f9-c5146e878d72\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " Dec 13 06:49:29 crc kubenswrapper[5048]: I1213 06:49:29.429674 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-scripts\") pod \"69610210-b3a1-4e28-b3f9-c5146e878d72\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " Dec 13 06:49:29 crc kubenswrapper[5048]: I1213 06:49:29.429712 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-combined-ca-bundle\") pod \"69610210-b3a1-4e28-b3f9-c5146e878d72\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " Dec 13 06:49:29 crc kubenswrapper[5048]: I1213 06:49:29.429780 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz6lb\" (UniqueName: \"kubernetes.io/projected/69610210-b3a1-4e28-b3f9-c5146e878d72-kube-api-access-lz6lb\") pod \"69610210-b3a1-4e28-b3f9-c5146e878d72\" (UID: \"69610210-b3a1-4e28-b3f9-c5146e878d72\") " Dec 13 06:49:29 crc kubenswrapper[5048]: I1213 06:49:29.435602 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-scripts" (OuterVolumeSpecName: "scripts") pod "69610210-b3a1-4e28-b3f9-c5146e878d72" (UID: "69610210-b3a1-4e28-b3f9-c5146e878d72"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:29 crc kubenswrapper[5048]: I1213 06:49:29.435694 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "69610210-b3a1-4e28-b3f9-c5146e878d72" (UID: "69610210-b3a1-4e28-b3f9-c5146e878d72"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:29 crc kubenswrapper[5048]: I1213 06:49:29.445066 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69610210-b3a1-4e28-b3f9-c5146e878d72-kube-api-access-lz6lb" (OuterVolumeSpecName: "kube-api-access-lz6lb") pod "69610210-b3a1-4e28-b3f9-c5146e878d72" (UID: "69610210-b3a1-4e28-b3f9-c5146e878d72"). InnerVolumeSpecName "kube-api-access-lz6lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:49:29 crc kubenswrapper[5048]: I1213 06:49:29.452701 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "69610210-b3a1-4e28-b3f9-c5146e878d72" (UID: "69610210-b3a1-4e28-b3f9-c5146e878d72"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:29 crc kubenswrapper[5048]: I1213 06:49:29.459989 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "69610210-b3a1-4e28-b3f9-c5146e878d72" (UID: "69610210-b3a1-4e28-b3f9-c5146e878d72"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:29 crc kubenswrapper[5048]: I1213 06:49:29.460391 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-config-data" (OuterVolumeSpecName: "config-data") pod "69610210-b3a1-4e28-b3f9-c5146e878d72" (UID: "69610210-b3a1-4e28-b3f9-c5146e878d72"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:29 crc kubenswrapper[5048]: I1213 06:49:29.531531 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz6lb\" (UniqueName: \"kubernetes.io/projected/69610210-b3a1-4e28-b3f9-c5146e878d72-kube-api-access-lz6lb\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:29 crc kubenswrapper[5048]: I1213 06:49:29.531585 5048 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-credential-keys\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:29 crc kubenswrapper[5048]: I1213 06:49:29.531645 5048 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:29 crc kubenswrapper[5048]: I1213 06:49:29.531661 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:29 crc kubenswrapper[5048]: I1213 06:49:29.531677 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:29 crc kubenswrapper[5048]: I1213 06:49:29.531730 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69610210-b3a1-4e28-b3f9-c5146e878d72-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:29 crc kubenswrapper[5048]: I1213 06:49:29.663822 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l75gl" event={"ID":"69610210-b3a1-4e28-b3f9-c5146e878d72","Type":"ContainerDied","Data":"47e38f8edbefd696fa098dd014177602421ed48d4b07676b017315e8610ce667"} Dec 13 06:49:29 crc kubenswrapper[5048]: I1213 06:49:29.663862 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="47e38f8edbefd696fa098dd014177602421ed48d4b07676b017315e8610ce667" Dec 13 06:49:29 crc kubenswrapper[5048]: I1213 06:49:29.663875 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l75gl" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.386568 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-l75gl"] Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.395344 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-l75gl"] Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.474931 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-jzn5h"] Dec 13 06:49:30 crc kubenswrapper[5048]: E1213 06:49:30.475411 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69610210-b3a1-4e28-b3f9-c5146e878d72" containerName="keystone-bootstrap" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.475447 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="69610210-b3a1-4e28-b3f9-c5146e878d72" containerName="keystone-bootstrap" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.475690 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="69610210-b3a1-4e28-b3f9-c5146e878d72" containerName="keystone-bootstrap" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.476316 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jzn5h" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.478639 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.478673 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-scw59" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.478642 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.478807 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.482281 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.482287 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-jzn5h"] Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.537809 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" podUID="b53097b1-3126-4e17-b910-d2b7f57ec87e" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.123:5353: connect: connection refused" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.577837 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69610210-b3a1-4e28-b3f9-c5146e878d72" path="/var/lib/kubelet/pods/69610210-b3a1-4e28-b3f9-c5146e878d72/volumes" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.652318 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-config-data\") pod \"keystone-bootstrap-jzn5h\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " pod="openstack/keystone-bootstrap-jzn5h" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.652388 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-fernet-keys\") pod \"keystone-bootstrap-jzn5h\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " pod="openstack/keystone-bootstrap-jzn5h" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.652993 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-credential-keys\") pod \"keystone-bootstrap-jzn5h\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " pod="openstack/keystone-bootstrap-jzn5h" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.653039 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-combined-ca-bundle\") pod \"keystone-bootstrap-jzn5h\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " pod="openstack/keystone-bootstrap-jzn5h" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.653119 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kshmm\" (UniqueName: \"kubernetes.io/projected/2d92d376-9d6c-404a-b1f5-a13a67276b6f-kube-api-access-kshmm\") pod \"keystone-bootstrap-jzn5h\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " pod="openstack/keystone-bootstrap-jzn5h" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.653166 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-scripts\") pod \"keystone-bootstrap-jzn5h\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " pod="openstack/keystone-bootstrap-jzn5h" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.754894 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-scripts\") pod \"keystone-bootstrap-jzn5h\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " pod="openstack/keystone-bootstrap-jzn5h" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.755036 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-config-data\") pod \"keystone-bootstrap-jzn5h\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " pod="openstack/keystone-bootstrap-jzn5h" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.755078 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-fernet-keys\") pod \"keystone-bootstrap-jzn5h\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " pod="openstack/keystone-bootstrap-jzn5h" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.755213 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-credential-keys\") pod \"keystone-bootstrap-jzn5h\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " pod="openstack/keystone-bootstrap-jzn5h" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.755240 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-combined-ca-bundle\") pod \"keystone-bootstrap-jzn5h\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " pod="openstack/keystone-bootstrap-jzn5h" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.755305 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kshmm\" (UniqueName: \"kubernetes.io/projected/2d92d376-9d6c-404a-b1f5-a13a67276b6f-kube-api-access-kshmm\") pod \"keystone-bootstrap-jzn5h\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " pod="openstack/keystone-bootstrap-jzn5h" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.761540 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-scripts\") pod \"keystone-bootstrap-jzn5h\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " pod="openstack/keystone-bootstrap-jzn5h" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.761658 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-credential-keys\") pod \"keystone-bootstrap-jzn5h\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " pod="openstack/keystone-bootstrap-jzn5h" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.761900 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-combined-ca-bundle\") pod \"keystone-bootstrap-jzn5h\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " pod="openstack/keystone-bootstrap-jzn5h" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.762538 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-config-data\") pod \"keystone-bootstrap-jzn5h\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " pod="openstack/keystone-bootstrap-jzn5h" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.763392 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-fernet-keys\") pod \"keystone-bootstrap-jzn5h\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " pod="openstack/keystone-bootstrap-jzn5h" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.774151 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kshmm\" (UniqueName: \"kubernetes.io/projected/2d92d376-9d6c-404a-b1f5-a13a67276b6f-kube-api-access-kshmm\") pod \"keystone-bootstrap-jzn5h\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " pod="openstack/keystone-bootstrap-jzn5h" Dec 13 06:49:30 crc kubenswrapper[5048]: I1213 06:49:30.800142 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jzn5h" Dec 13 06:49:35 crc kubenswrapper[5048]: E1213 06:49:35.048780 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Dec 13 06:49:35 crc kubenswrapper[5048]: E1213 06:49:35.049365 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nc5h5d7h57bh54ch5dfh78h7ch557h56ch5cbh584h5f7h64dh54h56chcch57ch57fh5b4h545h676h597h6h5bdh5b9h575h557h687h54chdch5b8h559q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fnwdm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-769d668ff7-qx7l7_openstack(2476d7cd-fb97-45d1-a67d-8fb867d8f296): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:49:35 crc kubenswrapper[5048]: E1213 06:49:35.051984 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-769d668ff7-qx7l7" podUID="2476d7cd-fb97-45d1-a67d-8fb867d8f296" Dec 13 06:49:35 crc kubenswrapper[5048]: E1213 06:49:35.078342 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Dec 13 06:49:35 crc kubenswrapper[5048]: E1213 06:49:35.078517 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n695h67dhf6h545hb4hcbh65ch66ch58dh689hbfh686h576hb4h5bch687h645h65fhcfh64dh67h89h5d6h658h5ddhd8h54dh6bh5dfh95h655h66fq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-t8k2m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-85745844cf-bqdb4_openstack(00a24942-715c-4fbd-bb97-1128504ef182): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:49:35 crc kubenswrapper[5048]: E1213 06:49:35.080584 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-85745844cf-bqdb4" podUID="00a24942-715c-4fbd-bb97-1128504ef182" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.128338 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.246772 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-logs\") pod \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.246858 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-scripts\") pod \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.246952 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-httpd-run\") pod \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.247023 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-config-data\") pod \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.247054 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p6kr6\" (UniqueName: \"kubernetes.io/projected/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-kube-api-access-p6kr6\") pod \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.247099 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-combined-ca-bundle\") pod \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.247177 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\" (UID: \"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b\") " Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.250032 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-logs" (OuterVolumeSpecName: "logs") pod "33ee9f9c-b391-48fe-80f1-7d078b8e7b5b" (UID: "33ee9f9c-b391-48fe-80f1-7d078b8e7b5b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.250278 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "33ee9f9c-b391-48fe-80f1-7d078b8e7b5b" (UID: "33ee9f9c-b391-48fe-80f1-7d078b8e7b5b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.255682 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "33ee9f9c-b391-48fe-80f1-7d078b8e7b5b" (UID: "33ee9f9c-b391-48fe-80f1-7d078b8e7b5b"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.258195 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-scripts" (OuterVolumeSpecName: "scripts") pod "33ee9f9c-b391-48fe-80f1-7d078b8e7b5b" (UID: "33ee9f9c-b391-48fe-80f1-7d078b8e7b5b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.264539 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-kube-api-access-p6kr6" (OuterVolumeSpecName: "kube-api-access-p6kr6") pod "33ee9f9c-b391-48fe-80f1-7d078b8e7b5b" (UID: "33ee9f9c-b391-48fe-80f1-7d078b8e7b5b"). InnerVolumeSpecName "kube-api-access-p6kr6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.281889 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "33ee9f9c-b391-48fe-80f1-7d078b8e7b5b" (UID: "33ee9f9c-b391-48fe-80f1-7d078b8e7b5b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.302865 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-config-data" (OuterVolumeSpecName: "config-data") pod "33ee9f9c-b391-48fe-80f1-7d078b8e7b5b" (UID: "33ee9f9c-b391-48fe-80f1-7d078b8e7b5b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.350561 5048 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-logs\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.350607 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.350619 5048 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.350632 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.350644 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p6kr6\" (UniqueName: \"kubernetes.io/projected/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-kube-api-access-p6kr6\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.350658 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.350698 5048 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.370163 5048 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.451803 5048 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.537466 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" podUID="b53097b1-3126-4e17-b910-d2b7f57ec87e" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.123:5353: connect: connection refused" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.711427 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.720611 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"33ee9f9c-b391-48fe-80f1-7d078b8e7b5b","Type":"ContainerDied","Data":"b2f4380a8a428da29e4aee9bb5c44bcf5b871f6c9a9d5f94d7de270585d127c2"} Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.720687 5048 scope.go:117] "RemoveContainer" containerID="7c2b47110a25f9e5409299bde9b79667ce0a70c556df346e5228288cc3d906cc" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.766544 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.774300 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.801251 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 13 06:49:35 crc kubenswrapper[5048]: E1213 06:49:35.801728 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33ee9f9c-b391-48fe-80f1-7d078b8e7b5b" containerName="glance-log" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.801749 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="33ee9f9c-b391-48fe-80f1-7d078b8e7b5b" containerName="glance-log" Dec 13 06:49:35 crc kubenswrapper[5048]: E1213 06:49:35.801765 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33ee9f9c-b391-48fe-80f1-7d078b8e7b5b" containerName="glance-httpd" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.801773 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="33ee9f9c-b391-48fe-80f1-7d078b8e7b5b" containerName="glance-httpd" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.802069 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="33ee9f9c-b391-48fe-80f1-7d078b8e7b5b" containerName="glance-httpd" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.802100 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="33ee9f9c-b391-48fe-80f1-7d078b8e7b5b" containerName="glance-log" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.803202 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.812533 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.812639 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.812660 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.860371 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.860424 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.864562 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-logs\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.864619 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xd76c\" (UniqueName: \"kubernetes.io/projected/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-kube-api-access-xd76c\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.864764 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-config-data\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.864842 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-scripts\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.864967 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.865035 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.967598 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-scripts\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.967709 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.967738 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.967800 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.967824 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.967858 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-logs\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.967885 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xd76c\" (UniqueName: \"kubernetes.io/projected/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-kube-api-access-xd76c\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.967960 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-config-data\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.969730 5048 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.970640 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-logs\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.971061 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.973976 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.979974 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.980584 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-scripts\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.987754 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-config-data\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:35 crc kubenswrapper[5048]: I1213 06:49:35.994417 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xd76c\" (UniqueName: \"kubernetes.io/projected/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-kube-api-access-xd76c\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:36 crc kubenswrapper[5048]: I1213 06:49:36.010328 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " pod="openstack/glance-default-external-api-0" Dec 13 06:49:36 crc kubenswrapper[5048]: I1213 06:49:36.135076 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 13 06:49:36 crc kubenswrapper[5048]: I1213 06:49:36.579501 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33ee9f9c-b391-48fe-80f1-7d078b8e7b5b" path="/var/lib/kubelet/pods/33ee9f9c-b391-48fe-80f1-7d078b8e7b5b/volumes" Dec 13 06:49:37 crc kubenswrapper[5048]: E1213 06:49:37.221635 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Dec 13 06:49:37 crc kubenswrapper[5048]: E1213 06:49:37.221788 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5dch88h8fh5b7hf4h5ffhb8h597h5ffh5dch5b5h5b8h5d4h56dh9h599h5b9hbfh7dh57h77h6bh648h696h547h659h5b7h646h5c9h68fh5cdh669q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pxjxq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-8c8f677f9-25477_openstack(9e51f251-b4ed-4920-b015-e9ac75c618b2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:49:37 crc kubenswrapper[5048]: E1213 06:49:37.224066 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-8c8f677f9-25477" podUID="9e51f251-b4ed-4920-b015-e9ac75c618b2" Dec 13 06:49:45 crc kubenswrapper[5048]: I1213 06:49:45.537159 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" podUID="b53097b1-3126-4e17-b910-d2b7f57ec87e" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.123:5353: i/o timeout" Dec 13 06:49:45 crc kubenswrapper[5048]: I1213 06:49:45.538759 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.044180 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-769d668ff7-qx7l7" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.052590 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85745844cf-bqdb4" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.175714 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/00a24942-715c-4fbd-bb97-1128504ef182-horizon-secret-key\") pod \"00a24942-715c-4fbd-bb97-1128504ef182\" (UID: \"00a24942-715c-4fbd-bb97-1128504ef182\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.175882 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2476d7cd-fb97-45d1-a67d-8fb867d8f296-horizon-secret-key\") pod \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\" (UID: \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.175922 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2476d7cd-fb97-45d1-a67d-8fb867d8f296-config-data\") pod \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\" (UID: \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.175938 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/00a24942-715c-4fbd-bb97-1128504ef182-config-data\") pod \"00a24942-715c-4fbd-bb97-1128504ef182\" (UID: \"00a24942-715c-4fbd-bb97-1128504ef182\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.175960 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00a24942-715c-4fbd-bb97-1128504ef182-logs\") pod \"00a24942-715c-4fbd-bb97-1128504ef182\" (UID: \"00a24942-715c-4fbd-bb97-1128504ef182\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.175983 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2476d7cd-fb97-45d1-a67d-8fb867d8f296-scripts\") pod \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\" (UID: \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.176008 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fnwdm\" (UniqueName: \"kubernetes.io/projected/2476d7cd-fb97-45d1-a67d-8fb867d8f296-kube-api-access-fnwdm\") pod \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\" (UID: \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.176053 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/00a24942-715c-4fbd-bb97-1128504ef182-scripts\") pod \"00a24942-715c-4fbd-bb97-1128504ef182\" (UID: \"00a24942-715c-4fbd-bb97-1128504ef182\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.176071 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t8k2m\" (UniqueName: \"kubernetes.io/projected/00a24942-715c-4fbd-bb97-1128504ef182-kube-api-access-t8k2m\") pod \"00a24942-715c-4fbd-bb97-1128504ef182\" (UID: \"00a24942-715c-4fbd-bb97-1128504ef182\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.176094 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2476d7cd-fb97-45d1-a67d-8fb867d8f296-logs\") pod \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\" (UID: \"2476d7cd-fb97-45d1-a67d-8fb867d8f296\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.176531 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2476d7cd-fb97-45d1-a67d-8fb867d8f296-config-data" (OuterVolumeSpecName: "config-data") pod "2476d7cd-fb97-45d1-a67d-8fb867d8f296" (UID: "2476d7cd-fb97-45d1-a67d-8fb867d8f296"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.176724 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2476d7cd-fb97-45d1-a67d-8fb867d8f296-logs" (OuterVolumeSpecName: "logs") pod "2476d7cd-fb97-45d1-a67d-8fb867d8f296" (UID: "2476d7cd-fb97-45d1-a67d-8fb867d8f296"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.177044 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2476d7cd-fb97-45d1-a67d-8fb867d8f296-scripts" (OuterVolumeSpecName: "scripts") pod "2476d7cd-fb97-45d1-a67d-8fb867d8f296" (UID: "2476d7cd-fb97-45d1-a67d-8fb867d8f296"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.177413 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00a24942-715c-4fbd-bb97-1128504ef182-logs" (OuterVolumeSpecName: "logs") pod "00a24942-715c-4fbd-bb97-1128504ef182" (UID: "00a24942-715c-4fbd-bb97-1128504ef182"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.177516 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00a24942-715c-4fbd-bb97-1128504ef182-config-data" (OuterVolumeSpecName: "config-data") pod "00a24942-715c-4fbd-bb97-1128504ef182" (UID: "00a24942-715c-4fbd-bb97-1128504ef182"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.177961 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00a24942-715c-4fbd-bb97-1128504ef182-scripts" (OuterVolumeSpecName: "scripts") pod "00a24942-715c-4fbd-bb97-1128504ef182" (UID: "00a24942-715c-4fbd-bb97-1128504ef182"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.180644 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2476d7cd-fb97-45d1-a67d-8fb867d8f296-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "2476d7cd-fb97-45d1-a67d-8fb867d8f296" (UID: "2476d7cd-fb97-45d1-a67d-8fb867d8f296"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.181633 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00a24942-715c-4fbd-bb97-1128504ef182-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "00a24942-715c-4fbd-bb97-1128504ef182" (UID: "00a24942-715c-4fbd-bb97-1128504ef182"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.181857 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00a24942-715c-4fbd-bb97-1128504ef182-kube-api-access-t8k2m" (OuterVolumeSpecName: "kube-api-access-t8k2m") pod "00a24942-715c-4fbd-bb97-1128504ef182" (UID: "00a24942-715c-4fbd-bb97-1128504ef182"). InnerVolumeSpecName "kube-api-access-t8k2m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.183123 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2476d7cd-fb97-45d1-a67d-8fb867d8f296-kube-api-access-fnwdm" (OuterVolumeSpecName: "kube-api-access-fnwdm") pod "2476d7cd-fb97-45d1-a67d-8fb867d8f296" (UID: "2476d7cd-fb97-45d1-a67d-8fb867d8f296"). InnerVolumeSpecName "kube-api-access-fnwdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.220908 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.221282 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.221338 5048 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.222178 5048 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3dc134f93584bad71d64c1b4cfe7bce9b820cc019159e00fde72b17e966595a4"} pod="openshift-machine-config-operator/machine-config-daemon-j7hns" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.222251 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" containerID="cri-o://3dc134f93584bad71d64c1b4cfe7bce9b820cc019159e00fde72b17e966595a4" gracePeriod=600 Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.277754 5048 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2476d7cd-fb97-45d1-a67d-8fb867d8f296-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.277797 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2476d7cd-fb97-45d1-a67d-8fb867d8f296-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.277809 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/00a24942-715c-4fbd-bb97-1128504ef182-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.277821 5048 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00a24942-715c-4fbd-bb97-1128504ef182-logs\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.277831 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2476d7cd-fb97-45d1-a67d-8fb867d8f296-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.277882 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fnwdm\" (UniqueName: \"kubernetes.io/projected/2476d7cd-fb97-45d1-a67d-8fb867d8f296-kube-api-access-fnwdm\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.277895 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/00a24942-715c-4fbd-bb97-1128504ef182-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.277903 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t8k2m\" (UniqueName: \"kubernetes.io/projected/00a24942-715c-4fbd-bb97-1128504ef182-kube-api-access-t8k2m\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.277911 5048 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2476d7cd-fb97-45d1-a67d-8fb867d8f296-logs\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.277919 5048 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/00a24942-715c-4fbd-bb97-1128504ef182-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: E1213 06:49:46.501830 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Dec 13 06:49:46 crc kubenswrapper[5048]: E1213 06:49:46.501978 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-677wd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-pzgrc_openstack(af6830b6-96f2-487f-ba02-93c7f01d0ceb): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:49:46 crc kubenswrapper[5048]: E1213 06:49:46.503545 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-pzgrc" podUID="af6830b6-96f2-487f-ba02-93c7f01d0ceb" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.593562 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.602754 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8c8f677f9-25477" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.643917 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.686032 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4mfw\" (UniqueName: \"kubernetes.io/projected/b53097b1-3126-4e17-b910-d2b7f57ec87e-kube-api-access-v4mfw\") pod \"b53097b1-3126-4e17-b910-d2b7f57ec87e\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.686082 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9e51f251-b4ed-4920-b015-e9ac75c618b2-horizon-secret-key\") pod \"9e51f251-b4ed-4920-b015-e9ac75c618b2\" (UID: \"9e51f251-b4ed-4920-b015-e9ac75c618b2\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.686129 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-dns-swift-storage-0\") pod \"b53097b1-3126-4e17-b910-d2b7f57ec87e\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.686237 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9e51f251-b4ed-4920-b015-e9ac75c618b2-scripts\") pod \"9e51f251-b4ed-4920-b015-e9ac75c618b2\" (UID: \"9e51f251-b4ed-4920-b015-e9ac75c618b2\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.686263 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-dns-svc\") pod \"b53097b1-3126-4e17-b910-d2b7f57ec87e\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.686287 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pxjxq\" (UniqueName: \"kubernetes.io/projected/9e51f251-b4ed-4920-b015-e9ac75c618b2-kube-api-access-pxjxq\") pod \"9e51f251-b4ed-4920-b015-e9ac75c618b2\" (UID: \"9e51f251-b4ed-4920-b015-e9ac75c618b2\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.686306 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9e51f251-b4ed-4920-b015-e9ac75c618b2-config-data\") pod \"9e51f251-b4ed-4920-b015-e9ac75c618b2\" (UID: \"9e51f251-b4ed-4920-b015-e9ac75c618b2\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.686337 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e51f251-b4ed-4920-b015-e9ac75c618b2-logs\") pod \"9e51f251-b4ed-4920-b015-e9ac75c618b2\" (UID: \"9e51f251-b4ed-4920-b015-e9ac75c618b2\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.686356 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-config\") pod \"b53097b1-3126-4e17-b910-d2b7f57ec87e\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.686400 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-ovsdbserver-nb\") pod \"b53097b1-3126-4e17-b910-d2b7f57ec87e\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.686447 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-ovsdbserver-sb\") pod \"b53097b1-3126-4e17-b910-d2b7f57ec87e\" (UID: \"b53097b1-3126-4e17-b910-d2b7f57ec87e\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.687192 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e51f251-b4ed-4920-b015-e9ac75c618b2-config-data" (OuterVolumeSpecName: "config-data") pod "9e51f251-b4ed-4920-b015-e9ac75c618b2" (UID: "9e51f251-b4ed-4920-b015-e9ac75c618b2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.687579 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e51f251-b4ed-4920-b015-e9ac75c618b2-logs" (OuterVolumeSpecName: "logs") pod "9e51f251-b4ed-4920-b015-e9ac75c618b2" (UID: "9e51f251-b4ed-4920-b015-e9ac75c618b2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.688061 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e51f251-b4ed-4920-b015-e9ac75c618b2-scripts" (OuterVolumeSpecName: "scripts") pod "9e51f251-b4ed-4920-b015-e9ac75c618b2" (UID: "9e51f251-b4ed-4920-b015-e9ac75c618b2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.690575 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e51f251-b4ed-4920-b015-e9ac75c618b2-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "9e51f251-b4ed-4920-b015-e9ac75c618b2" (UID: "9e51f251-b4ed-4920-b015-e9ac75c618b2"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.691663 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e51f251-b4ed-4920-b015-e9ac75c618b2-kube-api-access-pxjxq" (OuterVolumeSpecName: "kube-api-access-pxjxq") pod "9e51f251-b4ed-4920-b015-e9ac75c618b2" (UID: "9e51f251-b4ed-4920-b015-e9ac75c618b2"). InnerVolumeSpecName "kube-api-access-pxjxq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.694952 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b53097b1-3126-4e17-b910-d2b7f57ec87e-kube-api-access-v4mfw" (OuterVolumeSpecName: "kube-api-access-v4mfw") pod "b53097b1-3126-4e17-b910-d2b7f57ec87e" (UID: "b53097b1-3126-4e17-b910-d2b7f57ec87e"). InnerVolumeSpecName "kube-api-access-v4mfw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.734530 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b53097b1-3126-4e17-b910-d2b7f57ec87e" (UID: "b53097b1-3126-4e17-b910-d2b7f57ec87e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.737632 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b53097b1-3126-4e17-b910-d2b7f57ec87e" (UID: "b53097b1-3126-4e17-b910-d2b7f57ec87e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.741633 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b53097b1-3126-4e17-b910-d2b7f57ec87e" (UID: "b53097b1-3126-4e17-b910-d2b7f57ec87e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.747028 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b53097b1-3126-4e17-b910-d2b7f57ec87e" (UID: "b53097b1-3126-4e17-b910-d2b7f57ec87e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.747123 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-config" (OuterVolumeSpecName: "config") pod "b53097b1-3126-4e17-b910-d2b7f57ec87e" (UID: "b53097b1-3126-4e17-b910-d2b7f57ec87e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.788191 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/468a6ac0-0a50-4f93-b213-0175c8b756c8-combined-ca-bundle\") pod \"468a6ac0-0a50-4f93-b213-0175c8b756c8\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.788801 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/468a6ac0-0a50-4f93-b213-0175c8b756c8-config-data\") pod \"468a6ac0-0a50-4f93-b213-0175c8b756c8\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.789918 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/468a6ac0-0a50-4f93-b213-0175c8b756c8-httpd-run\") pod \"468a6ac0-0a50-4f93-b213-0175c8b756c8\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.790152 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/468a6ac0-0a50-4f93-b213-0175c8b756c8-scripts\") pod \"468a6ac0-0a50-4f93-b213-0175c8b756c8\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.790203 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-74qtl\" (UniqueName: \"kubernetes.io/projected/468a6ac0-0a50-4f93-b213-0175c8b756c8-kube-api-access-74qtl\") pod \"468a6ac0-0a50-4f93-b213-0175c8b756c8\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.790269 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/468a6ac0-0a50-4f93-b213-0175c8b756c8-logs\") pod \"468a6ac0-0a50-4f93-b213-0175c8b756c8\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.790302 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"468a6ac0-0a50-4f93-b213-0175c8b756c8\" (UID: \"468a6ac0-0a50-4f93-b213-0175c8b756c8\") " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.791327 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.791360 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.791378 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4mfw\" (UniqueName: \"kubernetes.io/projected/b53097b1-3126-4e17-b910-d2b7f57ec87e-kube-api-access-v4mfw\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.791397 5048 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9e51f251-b4ed-4920-b015-e9ac75c618b2-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.791410 5048 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.791422 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9e51f251-b4ed-4920-b015-e9ac75c618b2-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.791455 5048 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.791467 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pxjxq\" (UniqueName: \"kubernetes.io/projected/9e51f251-b4ed-4920-b015-e9ac75c618b2-kube-api-access-pxjxq\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.791478 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9e51f251-b4ed-4920-b015-e9ac75c618b2-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.791493 5048 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e51f251-b4ed-4920-b015-e9ac75c618b2-logs\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.791505 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b53097b1-3126-4e17-b910-d2b7f57ec87e-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.791342 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/468a6ac0-0a50-4f93-b213-0175c8b756c8-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "468a6ac0-0a50-4f93-b213-0175c8b756c8" (UID: "468a6ac0-0a50-4f93-b213-0175c8b756c8"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.792200 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/468a6ac0-0a50-4f93-b213-0175c8b756c8-logs" (OuterVolumeSpecName: "logs") pod "468a6ac0-0a50-4f93-b213-0175c8b756c8" (UID: "468a6ac0-0a50-4f93-b213-0175c8b756c8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.793616 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/468a6ac0-0a50-4f93-b213-0175c8b756c8-scripts" (OuterVolumeSpecName: "scripts") pod "468a6ac0-0a50-4f93-b213-0175c8b756c8" (UID: "468a6ac0-0a50-4f93-b213-0175c8b756c8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.794764 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/468a6ac0-0a50-4f93-b213-0175c8b756c8-kube-api-access-74qtl" (OuterVolumeSpecName: "kube-api-access-74qtl") pod "468a6ac0-0a50-4f93-b213-0175c8b756c8" (UID: "468a6ac0-0a50-4f93-b213-0175c8b756c8"). InnerVolumeSpecName "kube-api-access-74qtl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.796010 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "468a6ac0-0a50-4f93-b213-0175c8b756c8" (UID: "468a6ac0-0a50-4f93-b213-0175c8b756c8"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.816022 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/468a6ac0-0a50-4f93-b213-0175c8b756c8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "468a6ac0-0a50-4f93-b213-0175c8b756c8" (UID: "468a6ac0-0a50-4f93-b213-0175c8b756c8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.816329 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.816330 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" event={"ID":"b53097b1-3126-4e17-b910-d2b7f57ec87e","Type":"ContainerDied","Data":"ff272d80ea1548840dad8361734bc6acbe33d378be633a438baedc9a06e7def9"} Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.819543 5048 generic.go:334] "Generic (PLEG): container finished" podID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerID="3dc134f93584bad71d64c1b4cfe7bce9b820cc019159e00fde72b17e966595a4" exitCode=0 Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.819623 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerDied","Data":"3dc134f93584bad71d64c1b4cfe7bce9b820cc019159e00fde72b17e966595a4"} Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.821504 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-769d668ff7-qx7l7" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.821534 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-769d668ff7-qx7l7" event={"ID":"2476d7cd-fb97-45d1-a67d-8fb867d8f296","Type":"ContainerDied","Data":"50e4f8d8be68038582635f116ea7bdf898c8cdceadd414b6df838274e8359d19"} Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.826255 5048 generic.go:334] "Generic (PLEG): container finished" podID="34ff9369-df92-416e-a391-18b362cd491f" containerID="91f972f3ba413690e89f2f05548d1680b8d74a87443549898fe18c686de2c727" exitCode=0 Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.826324 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-7ksgw" event={"ID":"34ff9369-df92-416e-a391-18b362cd491f","Type":"ContainerDied","Data":"91f972f3ba413690e89f2f05548d1680b8d74a87443549898fe18c686de2c727"} Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.829541 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"468a6ac0-0a50-4f93-b213-0175c8b756c8","Type":"ContainerDied","Data":"b0408d10c356c3e6e3e162d8e4f14f2f2066bd2af538fbeeb7b8b6296fe1ea3a"} Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.829644 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.832357 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85745844cf-bqdb4" event={"ID":"00a24942-715c-4fbd-bb97-1128504ef182","Type":"ContainerDied","Data":"5ebd77429989d64f58f346bd3daf343dd4956e5b232f913cfd25efb52932a358"} Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.832574 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85745844cf-bqdb4" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.834391 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8c8f677f9-25477" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.834397 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8c8f677f9-25477" event={"ID":"9e51f251-b4ed-4920-b015-e9ac75c618b2","Type":"ContainerDied","Data":"72907b6fb08b9fc56df2a93bad5f6f708d367676e666716753e62bf234ce9bd1"} Dec 13 06:49:46 crc kubenswrapper[5048]: E1213 06:49:46.836510 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-pzgrc" podUID="af6830b6-96f2-487f-ba02-93c7f01d0ceb" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.847737 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/468a6ac0-0a50-4f93-b213-0175c8b756c8-config-data" (OuterVolumeSpecName: "config-data") pod "468a6ac0-0a50-4f93-b213-0175c8b756c8" (UID: "468a6ac0-0a50-4f93-b213-0175c8b756c8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.878151 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-769d668ff7-qx7l7"] Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.892731 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-769d668ff7-qx7l7"] Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.893551 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/468a6ac0-0a50-4f93-b213-0175c8b756c8-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.893598 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-74qtl\" (UniqueName: \"kubernetes.io/projected/468a6ac0-0a50-4f93-b213-0175c8b756c8-kube-api-access-74qtl\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.893617 5048 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/468a6ac0-0a50-4f93-b213-0175c8b756c8-logs\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.893666 5048 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.893685 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/468a6ac0-0a50-4f93-b213-0175c8b756c8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.893700 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/468a6ac0-0a50-4f93-b213-0175c8b756c8-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.893716 5048 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/468a6ac0-0a50-4f93-b213-0175c8b756c8-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.921196 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-8c8f677f9-25477"] Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.923473 5048 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.930199 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-8c8f677f9-25477"] Dec 13 06:49:46 crc kubenswrapper[5048]: I1213 06:49:46.995895 5048 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.006766 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-85745844cf-bqdb4"] Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.014993 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-85745844cf-bqdb4"] Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.022377 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-2kjcf"] Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.030413 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-2kjcf"] Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.169581 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.185927 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.201256 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 13 06:49:47 crc kubenswrapper[5048]: E1213 06:49:47.201847 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b53097b1-3126-4e17-b910-d2b7f57ec87e" containerName="init" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.201871 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="b53097b1-3126-4e17-b910-d2b7f57ec87e" containerName="init" Dec 13 06:49:47 crc kubenswrapper[5048]: E1213 06:49:47.201906 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b53097b1-3126-4e17-b910-d2b7f57ec87e" containerName="dnsmasq-dns" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.201917 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="b53097b1-3126-4e17-b910-d2b7f57ec87e" containerName="dnsmasq-dns" Dec 13 06:49:47 crc kubenswrapper[5048]: E1213 06:49:47.201947 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="468a6ac0-0a50-4f93-b213-0175c8b756c8" containerName="glance-log" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.201953 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="468a6ac0-0a50-4f93-b213-0175c8b756c8" containerName="glance-log" Dec 13 06:49:47 crc kubenswrapper[5048]: E1213 06:49:47.201967 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="468a6ac0-0a50-4f93-b213-0175c8b756c8" containerName="glance-httpd" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.201975 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="468a6ac0-0a50-4f93-b213-0175c8b756c8" containerName="glance-httpd" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.202144 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="468a6ac0-0a50-4f93-b213-0175c8b756c8" containerName="glance-log" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.202162 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="b53097b1-3126-4e17-b910-d2b7f57ec87e" containerName="dnsmasq-dns" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.202178 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="468a6ac0-0a50-4f93-b213-0175c8b756c8" containerName="glance-httpd" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.203339 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.205262 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.206227 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.210817 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.302922 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-scripts\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.302986 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.303091 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-config-data\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.303153 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.303192 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.303214 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqcdl\" (UniqueName: \"kubernetes.io/projected/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-kube-api-access-bqcdl\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.303279 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-logs\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.303350 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.404709 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.404773 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-scripts\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.404819 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.404892 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-config-data\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.404936 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.404969 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.404993 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqcdl\" (UniqueName: \"kubernetes.io/projected/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-kube-api-access-bqcdl\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.405028 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-logs\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.405215 5048 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.405313 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.411220 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-logs\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.412299 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.412342 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-config-data\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.425927 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.428861 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqcdl\" (UniqueName: \"kubernetes.io/projected/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-kube-api-access-bqcdl\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.442479 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-scripts\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.463522 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:49:47 crc kubenswrapper[5048]: I1213 06:49:47.520402 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.011135 5048 scope.go:117] "RemoveContainer" containerID="2f4de111509142f7f86ffbd90cf48a33b61f59a9ab2c14cf35dec3067eed7529" Dec 13 06:49:48 crc kubenswrapper[5048]: E1213 06:49:48.041984 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Dec 13 06:49:48 crc kubenswrapper[5048]: E1213 06:49:48.042156 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-n59rd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-9xjhb_openstack(8a4a2da1-cc7f-474f-baf7-16c352bd0708): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 06:49:48 crc kubenswrapper[5048]: E1213 06:49:48.043569 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-9xjhb" podUID="8a4a2da1-cc7f-474f-baf7-16c352bd0708" Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.198113 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-7ksgw" Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.209386 5048 scope.go:117] "RemoveContainer" containerID="6a3d15e2b73d49faf7e2bc2dde00bdf8e7146833acddabd1d2461771ec1f3cd7" Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.281483 5048 scope.go:117] "RemoveContainer" containerID="1f0cc3781d25c5f8bd9c9357cecd9de13b6028fe940551bf836d1e3a290ec1e9" Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.320227 5048 scope.go:117] "RemoveContainer" containerID="b78100e7e0c4b665e6330e2c03f1531c2cd133387b8c2c53260a2c5bf79c77e3" Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.331565 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/34ff9369-df92-416e-a391-18b362cd491f-config\") pod \"34ff9369-df92-416e-a391-18b362cd491f\" (UID: \"34ff9369-df92-416e-a391-18b362cd491f\") " Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.331633 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34ff9369-df92-416e-a391-18b362cd491f-combined-ca-bundle\") pod \"34ff9369-df92-416e-a391-18b362cd491f\" (UID: \"34ff9369-df92-416e-a391-18b362cd491f\") " Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.331747 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r8lwt\" (UniqueName: \"kubernetes.io/projected/34ff9369-df92-416e-a391-18b362cd491f-kube-api-access-r8lwt\") pod \"34ff9369-df92-416e-a391-18b362cd491f\" (UID: \"34ff9369-df92-416e-a391-18b362cd491f\") " Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.339686 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34ff9369-df92-416e-a391-18b362cd491f-kube-api-access-r8lwt" (OuterVolumeSpecName: "kube-api-access-r8lwt") pod "34ff9369-df92-416e-a391-18b362cd491f" (UID: "34ff9369-df92-416e-a391-18b362cd491f"). InnerVolumeSpecName "kube-api-access-r8lwt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.360484 5048 scope.go:117] "RemoveContainer" containerID="3fa964f8f4ff55b5940f2d0d3446730b9defe0491e61ee9dbb0b557d14a262c5" Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.368524 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34ff9369-df92-416e-a391-18b362cd491f-config" (OuterVolumeSpecName: "config") pod "34ff9369-df92-416e-a391-18b362cd491f" (UID: "34ff9369-df92-416e-a391-18b362cd491f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.372293 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34ff9369-df92-416e-a391-18b362cd491f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "34ff9369-df92-416e-a391-18b362cd491f" (UID: "34ff9369-df92-416e-a391-18b362cd491f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.389091 5048 scope.go:117] "RemoveContainer" containerID="9a118aa7c8475d5f77b0fe6d135f228d5c2b9bd4156b3d715d9a1fbe9b11231a" Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.435644 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r8lwt\" (UniqueName: \"kubernetes.io/projected/34ff9369-df92-416e-a391-18b362cd491f-kube-api-access-r8lwt\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.435679 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/34ff9369-df92-416e-a391-18b362cd491f-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.435695 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34ff9369-df92-416e-a391-18b362cd491f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.531794 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5fdc45567b-kg45h"] Dec 13 06:49:48 crc kubenswrapper[5048]: W1213 06:49:48.544717 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3c13197f_14c3_45a9_ba9c_bc89b80d6169.slice/crio-b0986a3e3cb62bf90ec7ac3c701c784c83e379ea416081bd25cafd19d23664d8 WatchSource:0}: Error finding container b0986a3e3cb62bf90ec7ac3c701c784c83e379ea416081bd25cafd19d23664d8: Status 404 returned error can't find the container with id b0986a3e3cb62bf90ec7ac3c701c784c83e379ea416081bd25cafd19d23664d8 Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.585368 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00a24942-715c-4fbd-bb97-1128504ef182" path="/var/lib/kubelet/pods/00a24942-715c-4fbd-bb97-1128504ef182/volumes" Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.585890 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2476d7cd-fb97-45d1-a67d-8fb867d8f296" path="/var/lib/kubelet/pods/2476d7cd-fb97-45d1-a67d-8fb867d8f296/volumes" Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.586302 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="468a6ac0-0a50-4f93-b213-0175c8b756c8" path="/var/lib/kubelet/pods/468a6ac0-0a50-4f93-b213-0175c8b756c8/volumes" Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.595595 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e51f251-b4ed-4920-b015-e9ac75c618b2" path="/var/lib/kubelet/pods/9e51f251-b4ed-4920-b015-e9ac75c618b2/volumes" Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.596262 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b53097b1-3126-4e17-b910-d2b7f57ec87e" path="/var/lib/kubelet/pods/b53097b1-3126-4e17-b910-d2b7f57ec87e/volumes" Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.632057 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-657fc95f76-vznd4"] Dec 13 06:49:48 crc kubenswrapper[5048]: W1213 06:49:48.642242 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1a49463c_d974_4631_b6ef_3f88d734ac2d.slice/crio-521c69c2c6b811c6be8f71927e9ec343d4744f7a3e3eeff31511ff30f21a4725 WatchSource:0}: Error finding container 521c69c2c6b811c6be8f71927e9ec343d4744f7a3e3eeff31511ff30f21a4725: Status 404 returned error can't find the container with id 521c69c2c6b811c6be8f71927e9ec343d4744f7a3e3eeff31511ff30f21a4725 Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.645552 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-jzn5h"] Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.705680 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.838371 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 13 06:49:48 crc kubenswrapper[5048]: W1213 06:49:48.867406 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce81fe6a_857c_4bcd_ae82_b7a6f0280326.slice/crio-2d908f1317d286b8cd259d0be5c9e39bbe338024d2c2820e373066abd4ff061f WatchSource:0}: Error finding container 2d908f1317d286b8cd259d0be5c9e39bbe338024d2c2820e373066abd4ff061f: Status 404 returned error can't find the container with id 2d908f1317d286b8cd259d0be5c9e39bbe338024d2c2820e373066abd4ff061f Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.878550 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-7ksgw" Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.878652 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-7ksgw" event={"ID":"34ff9369-df92-416e-a391-18b362cd491f","Type":"ContainerDied","Data":"87e0da901c7b05f90769adc2ad9dccdec99143859109daa9dbc6106ff58fe181"} Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.878679 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="87e0da901c7b05f90769adc2ad9dccdec99143859109daa9dbc6106ff58fe181" Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.884547 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jzn5h" event={"ID":"2d92d376-9d6c-404a-b1f5-a13a67276b6f","Type":"ContainerStarted","Data":"f658179f1045b16f7ce59fa79acb9bb08ab549060321280463d8096227c93159"} Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.886588 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-657fc95f76-vznd4" event={"ID":"1a49463c-d974-4631-b6ef-3f88d734ac2d","Type":"ContainerStarted","Data":"521c69c2c6b811c6be8f71927e9ec343d4744f7a3e3eeff31511ff30f21a4725"} Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.895996 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"12348a4c-feba-43e8-8f4f-bd2729ccd9ab","Type":"ContainerStarted","Data":"7aac0eac05f44ec94abd6a1fbc3a78b63b7f0ff8e37c36b7e92faf8ab69708d2"} Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.915617 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerStarted","Data":"73b8468a0b14a8e6512874e20fb2d8442254eac77f67c3fab0272cfdb9a926da"} Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.921012 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-jzn5h" podStartSLOduration=18.920985705 podStartE2EDuration="18.920985705s" podCreationTimestamp="2025-12-13 06:49:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:49:48.905560535 +0000 UTC m=+1222.772155116" watchObservedRunningTime="2025-12-13 06:49:48.920985705 +0000 UTC m=+1222.787580326" Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.924608 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b","Type":"ContainerStarted","Data":"03d5d3b195f7ca1dd1901cf64b9fb2ba462e881422823475e43e1e4bb15b4f76"} Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.934364 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-75nl7" event={"ID":"e351b290-4e5b-496d-94be-545f01ae8e15","Type":"ContainerStarted","Data":"471f7b0313c94298fd2e33c2b09c7fe2912c91488d77ebd1dcd62288fc6dc5f4"} Dec 13 06:49:48 crc kubenswrapper[5048]: I1213 06:49:48.956913 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fdc45567b-kg45h" event={"ID":"3c13197f-14c3-45a9-ba9c-bc89b80d6169","Type":"ContainerStarted","Data":"b0986a3e3cb62bf90ec7ac3c701c784c83e379ea416081bd25cafd19d23664d8"} Dec 13 06:49:48 crc kubenswrapper[5048]: E1213 06:49:48.964895 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-9xjhb" podUID="8a4a2da1-cc7f-474f-baf7-16c352bd0708" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.003999 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-75nl7" podStartSLOduration=5.495785651 podStartE2EDuration="35.003972366s" podCreationTimestamp="2025-12-13 06:49:14 +0000 UTC" firstStartedPulling="2025-12-13 06:49:16.448266768 +0000 UTC m=+1190.314861349" lastFinishedPulling="2025-12-13 06:49:45.956453473 +0000 UTC m=+1219.823048064" observedRunningTime="2025-12-13 06:49:48.978570994 +0000 UTC m=+1222.845165575" watchObservedRunningTime="2025-12-13 06:49:49.003972366 +0000 UTC m=+1222.870566947" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.097919 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-k4rmf"] Dec 13 06:49:49 crc kubenswrapper[5048]: E1213 06:49:49.098455 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34ff9369-df92-416e-a391-18b362cd491f" containerName="neutron-db-sync" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.098471 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="34ff9369-df92-416e-a391-18b362cd491f" containerName="neutron-db-sync" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.098896 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="34ff9369-df92-416e-a391-18b362cd491f" containerName="neutron-db-sync" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.100974 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.112559 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-k4rmf"] Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.187427 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7b89f46c6-sjtfz"] Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.190175 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7b89f46c6-sjtfz" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.193542 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.194020 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-hxzvm" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.194150 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.194275 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.208010 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7b89f46c6-sjtfz"] Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.271169 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fp4dj\" (UniqueName: \"kubernetes.io/projected/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-kube-api-access-fp4dj\") pod \"neutron-7b89f46c6-sjtfz\" (UID: \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\") " pod="openstack/neutron-7b89f46c6-sjtfz" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.271226 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-k4rmf\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.271324 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-k4rmf\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.271359 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-k4rmf\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.271401 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-dns-svc\") pod \"dnsmasq-dns-6b7b667979-k4rmf\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.271427 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-httpd-config\") pod \"neutron-7b89f46c6-sjtfz\" (UID: \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\") " pod="openstack/neutron-7b89f46c6-sjtfz" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.271566 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-config\") pod \"dnsmasq-dns-6b7b667979-k4rmf\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.271689 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-combined-ca-bundle\") pod \"neutron-7b89f46c6-sjtfz\" (UID: \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\") " pod="openstack/neutron-7b89f46c6-sjtfz" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.271726 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-config\") pod \"neutron-7b89f46c6-sjtfz\" (UID: \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\") " pod="openstack/neutron-7b89f46c6-sjtfz" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.271748 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w65ns\" (UniqueName: \"kubernetes.io/projected/830ced8e-31e0-474d-a13e-93d1825c2c8f-kube-api-access-w65ns\") pod \"dnsmasq-dns-6b7b667979-k4rmf\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.271864 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-ovndb-tls-certs\") pod \"neutron-7b89f46c6-sjtfz\" (UID: \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\") " pod="openstack/neutron-7b89f46c6-sjtfz" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.383191 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-k4rmf\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.383247 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-k4rmf\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.383288 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-dns-svc\") pod \"dnsmasq-dns-6b7b667979-k4rmf\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.383395 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-httpd-config\") pod \"neutron-7b89f46c6-sjtfz\" (UID: \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\") " pod="openstack/neutron-7b89f46c6-sjtfz" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.383632 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-config\") pod \"dnsmasq-dns-6b7b667979-k4rmf\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.383780 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-combined-ca-bundle\") pod \"neutron-7b89f46c6-sjtfz\" (UID: \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\") " pod="openstack/neutron-7b89f46c6-sjtfz" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.383818 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-config\") pod \"neutron-7b89f46c6-sjtfz\" (UID: \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\") " pod="openstack/neutron-7b89f46c6-sjtfz" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.383866 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w65ns\" (UniqueName: \"kubernetes.io/projected/830ced8e-31e0-474d-a13e-93d1825c2c8f-kube-api-access-w65ns\") pod \"dnsmasq-dns-6b7b667979-k4rmf\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.383956 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-ovndb-tls-certs\") pod \"neutron-7b89f46c6-sjtfz\" (UID: \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\") " pod="openstack/neutron-7b89f46c6-sjtfz" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.384069 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fp4dj\" (UniqueName: \"kubernetes.io/projected/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-kube-api-access-fp4dj\") pod \"neutron-7b89f46c6-sjtfz\" (UID: \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\") " pod="openstack/neutron-7b89f46c6-sjtfz" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.384092 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-k4rmf\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.384124 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-k4rmf\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.384183 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-k4rmf\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.384859 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-config\") pod \"dnsmasq-dns-6b7b667979-k4rmf\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.397192 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-httpd-config\") pod \"neutron-7b89f46c6-sjtfz\" (UID: \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\") " pod="openstack/neutron-7b89f46c6-sjtfz" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.397231 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-config\") pod \"neutron-7b89f46c6-sjtfz\" (UID: \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\") " pod="openstack/neutron-7b89f46c6-sjtfz" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.397570 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-dns-svc\") pod \"dnsmasq-dns-6b7b667979-k4rmf\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.398738 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-k4rmf\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.401965 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-ovndb-tls-certs\") pod \"neutron-7b89f46c6-sjtfz\" (UID: \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\") " pod="openstack/neutron-7b89f46c6-sjtfz" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.429059 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-combined-ca-bundle\") pod \"neutron-7b89f46c6-sjtfz\" (UID: \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\") " pod="openstack/neutron-7b89f46c6-sjtfz" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.437382 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fp4dj\" (UniqueName: \"kubernetes.io/projected/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-kube-api-access-fp4dj\") pod \"neutron-7b89f46c6-sjtfz\" (UID: \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\") " pod="openstack/neutron-7b89f46c6-sjtfz" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.465801 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w65ns\" (UniqueName: \"kubernetes.io/projected/830ced8e-31e0-474d-a13e-93d1825c2c8f-kube-api-access-w65ns\") pod \"dnsmasq-dns-6b7b667979-k4rmf\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.529027 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7b89f46c6-sjtfz" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.758276 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.970696 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"12348a4c-feba-43e8-8f4f-bd2729ccd9ab","Type":"ContainerStarted","Data":"480807ce5aa47b3660060c4461dcd0437d03a5d34ecc5ebadc256d917d3ed43c"} Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.972162 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jzn5h" event={"ID":"2d92d376-9d6c-404a-b1f5-a13a67276b6f","Type":"ContainerStarted","Data":"696cec693d739a77d0fa2773a7c4e66455dc16b82749b6448d55f0d5b8782b68"} Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.982039 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fdc45567b-kg45h" event={"ID":"3c13197f-14c3-45a9-ba9c-bc89b80d6169","Type":"ContainerStarted","Data":"97618be3b26b3b31a7f8fcb9a3ed26dd8767012b12160a1b25cdadbb069ffb87"} Dec 13 06:49:49 crc kubenswrapper[5048]: I1213 06:49:49.985870 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ce81fe6a-857c-4bcd-ae82-b7a6f0280326","Type":"ContainerStarted","Data":"2d908f1317d286b8cd259d0be5c9e39bbe338024d2c2820e373066abd4ff061f"} Dec 13 06:49:50 crc kubenswrapper[5048]: I1213 06:49:50.489200 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7b89f46c6-sjtfz"] Dec 13 06:49:50 crc kubenswrapper[5048]: I1213 06:49:50.539356 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5c79d794d7-2kjcf" podUID="b53097b1-3126-4e17-b910-d2b7f57ec87e" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.123:5353: i/o timeout" Dec 13 06:49:50 crc kubenswrapper[5048]: W1213 06:49:50.861962 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda6dbb4d5_4873_4bd5_a300_6bf8334f14db.slice/crio-0e257bdbbd34a08996fbf401438f52d6a87ed5fff22ad4822bed5e321801b1c8 WatchSource:0}: Error finding container 0e257bdbbd34a08996fbf401438f52d6a87ed5fff22ad4822bed5e321801b1c8: Status 404 returned error can't find the container with id 0e257bdbbd34a08996fbf401438f52d6a87ed5fff22ad4822bed5e321801b1c8 Dec 13 06:49:50 crc kubenswrapper[5048]: I1213 06:49:50.873751 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-k4rmf"] Dec 13 06:49:50 crc kubenswrapper[5048]: W1213 06:49:50.884648 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod830ced8e_31e0_474d_a13e_93d1825c2c8f.slice/crio-9189d8a81944443aa457792af83d6b296a46e60af5d34d1c17ccd4248db81eda WatchSource:0}: Error finding container 9189d8a81944443aa457792af83d6b296a46e60af5d34d1c17ccd4248db81eda: Status 404 returned error can't find the container with id 9189d8a81944443aa457792af83d6b296a46e60af5d34d1c17ccd4248db81eda Dec 13 06:49:51 crc kubenswrapper[5048]: I1213 06:49:51.102505 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" event={"ID":"830ced8e-31e0-474d-a13e-93d1825c2c8f","Type":"ContainerStarted","Data":"9189d8a81944443aa457792af83d6b296a46e60af5d34d1c17ccd4248db81eda"} Dec 13 06:49:51 crc kubenswrapper[5048]: I1213 06:49:51.114502 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ce81fe6a-857c-4bcd-ae82-b7a6f0280326","Type":"ContainerStarted","Data":"6a29f4af7eda17ec5252628950659560cb241ad301e31efa51a483f5ad18d122"} Dec 13 06:49:51 crc kubenswrapper[5048]: I1213 06:49:51.142873 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-657fc95f76-vznd4" event={"ID":"1a49463c-d974-4631-b6ef-3f88d734ac2d","Type":"ContainerStarted","Data":"52e8b9b3badb29fc40f1e5124f8c03c8c3543a4c0d9fd0a63e1483eae7a60be8"} Dec 13 06:49:51 crc kubenswrapper[5048]: I1213 06:49:51.174266 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7b89f46c6-sjtfz" event={"ID":"a6dbb4d5-4873-4bd5-a300-6bf8334f14db","Type":"ContainerStarted","Data":"0e257bdbbd34a08996fbf401438f52d6a87ed5fff22ad4822bed5e321801b1c8"} Dec 13 06:49:51 crc kubenswrapper[5048]: I1213 06:49:51.186302 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fdc45567b-kg45h" event={"ID":"3c13197f-14c3-45a9-ba9c-bc89b80d6169","Type":"ContainerStarted","Data":"a4c525fef07828f178bd4736adb2426b714431b369912c1d75d9b97962f6a924"} Dec 13 06:49:51 crc kubenswrapper[5048]: I1213 06:49:51.243760 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-5fdc45567b-kg45h" podStartSLOduration=24.699737845 podStartE2EDuration="25.243741919s" podCreationTimestamp="2025-12-13 06:49:26 +0000 UTC" firstStartedPulling="2025-12-13 06:49:48.547662512 +0000 UTC m=+1222.414257093" lastFinishedPulling="2025-12-13 06:49:49.091666586 +0000 UTC m=+1222.958261167" observedRunningTime="2025-12-13 06:49:51.22470187 +0000 UTC m=+1225.091296451" watchObservedRunningTime="2025-12-13 06:49:51.243741919 +0000 UTC m=+1225.110336500" Dec 13 06:49:52 crc kubenswrapper[5048]: I1213 06:49:52.208595 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7b89f46c6-sjtfz" event={"ID":"a6dbb4d5-4873-4bd5-a300-6bf8334f14db","Type":"ContainerStarted","Data":"d958edd44a5c7ad8c726d4d8fcc4015eac30080837930136ff0737d4c8849b05"} Dec 13 06:49:52 crc kubenswrapper[5048]: I1213 06:49:52.209953 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7b89f46c6-sjtfz" event={"ID":"a6dbb4d5-4873-4bd5-a300-6bf8334f14db","Type":"ContainerStarted","Data":"831a70e5ee0f1376097aa2d55a51702cb5c665cde4634627f05d7c49649253ad"} Dec 13 06:49:52 crc kubenswrapper[5048]: I1213 06:49:52.210064 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7b89f46c6-sjtfz" Dec 13 06:49:52 crc kubenswrapper[5048]: I1213 06:49:52.219890 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b","Type":"ContainerStarted","Data":"69d56a5567066958fafe92b1b8664aff26f228b91e5c1972493bb8b42cce91b9"} Dec 13 06:49:52 crc kubenswrapper[5048]: I1213 06:49:52.230615 5048 generic.go:334] "Generic (PLEG): container finished" podID="e351b290-4e5b-496d-94be-545f01ae8e15" containerID="471f7b0313c94298fd2e33c2b09c7fe2912c91488d77ebd1dcd62288fc6dc5f4" exitCode=0 Dec 13 06:49:52 crc kubenswrapper[5048]: I1213 06:49:52.230695 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-75nl7" event={"ID":"e351b290-4e5b-496d-94be-545f01ae8e15","Type":"ContainerDied","Data":"471f7b0313c94298fd2e33c2b09c7fe2912c91488d77ebd1dcd62288fc6dc5f4"} Dec 13 06:49:52 crc kubenswrapper[5048]: I1213 06:49:52.236835 5048 generic.go:334] "Generic (PLEG): container finished" podID="830ced8e-31e0-474d-a13e-93d1825c2c8f" containerID="5ef04b3283502adc5b482e11faccbf5090cd26fcbece944aefc1b0b0ff7bae6f" exitCode=0 Dec 13 06:49:52 crc kubenswrapper[5048]: I1213 06:49:52.236915 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" event={"ID":"830ced8e-31e0-474d-a13e-93d1825c2c8f","Type":"ContainerDied","Data":"5ef04b3283502adc5b482e11faccbf5090cd26fcbece944aefc1b0b0ff7bae6f"} Dec 13 06:49:52 crc kubenswrapper[5048]: I1213 06:49:52.239425 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7b89f46c6-sjtfz" podStartSLOduration=3.239381 podStartE2EDuration="3.239381s" podCreationTimestamp="2025-12-13 06:49:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:49:52.235505614 +0000 UTC m=+1226.102100215" watchObservedRunningTime="2025-12-13 06:49:52.239381 +0000 UTC m=+1226.105975581" Dec 13 06:49:52 crc kubenswrapper[5048]: I1213 06:49:52.254163 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ce81fe6a-857c-4bcd-ae82-b7a6f0280326","Type":"ContainerStarted","Data":"edcbf41f27739464565246ff6ee05b468e763fb695031f3c1e230ea539bc72a2"} Dec 13 06:49:52 crc kubenswrapper[5048]: I1213 06:49:52.266047 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"12348a4c-feba-43e8-8f4f-bd2729ccd9ab","Type":"ContainerStarted","Data":"0918df1446a8bbc0a0db551d1e60cec00bef8b44cc388416436a46a2f719cf62"} Dec 13 06:49:52 crc kubenswrapper[5048]: I1213 06:49:52.277187 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-657fc95f76-vznd4" event={"ID":"1a49463c-d974-4631-b6ef-3f88d734ac2d","Type":"ContainerStarted","Data":"663fa9604feba56cc332633b567ef631d92e429593936ccd577dd9d21ecf8aaa"} Dec 13 06:49:52 crc kubenswrapper[5048]: I1213 06:49:52.517247 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=17.517233521 podStartE2EDuration="17.517233521s" podCreationTimestamp="2025-12-13 06:49:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:49:52.514961639 +0000 UTC m=+1226.381556220" watchObservedRunningTime="2025-12-13 06:49:52.517233521 +0000 UTC m=+1226.383828092" Dec 13 06:49:52 crc kubenswrapper[5048]: I1213 06:49:52.619545 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-657fc95f76-vznd4" podStartSLOduration=25.939664501 podStartE2EDuration="26.619526968s" podCreationTimestamp="2025-12-13 06:49:26 +0000 UTC" firstStartedPulling="2025-12-13 06:49:48.6470594 +0000 UTC m=+1222.513653981" lastFinishedPulling="2025-12-13 06:49:49.326921867 +0000 UTC m=+1223.193516448" observedRunningTime="2025-12-13 06:49:52.614007898 +0000 UTC m=+1226.480602489" watchObservedRunningTime="2025-12-13 06:49:52.619526968 +0000 UTC m=+1226.486121559" Dec 13 06:49:52 crc kubenswrapper[5048]: I1213 06:49:52.632855 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.632836581 podStartE2EDuration="5.632836581s" podCreationTimestamp="2025-12-13 06:49:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:49:52.573800903 +0000 UTC m=+1226.440395504" watchObservedRunningTime="2025-12-13 06:49:52.632836581 +0000 UTC m=+1226.499431162" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.030948 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6465fd554f-k9lkr"] Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.033084 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.035814 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.036099 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.040489 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6465fd554f-k9lkr"] Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.320459 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4a6223fc-c3a5-462d-b61d-ebd353bbe7ca-config\") pod \"neutron-6465fd554f-k9lkr\" (UID: \"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca\") " pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.320543 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a6223fc-c3a5-462d-b61d-ebd353bbe7ca-public-tls-certs\") pod \"neutron-6465fd554f-k9lkr\" (UID: \"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca\") " pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.320571 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4a6223fc-c3a5-462d-b61d-ebd353bbe7ca-httpd-config\") pod \"neutron-6465fd554f-k9lkr\" (UID: \"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca\") " pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.320607 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a6223fc-c3a5-462d-b61d-ebd353bbe7ca-internal-tls-certs\") pod \"neutron-6465fd554f-k9lkr\" (UID: \"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca\") " pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.320672 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a6223fc-c3a5-462d-b61d-ebd353bbe7ca-combined-ca-bundle\") pod \"neutron-6465fd554f-k9lkr\" (UID: \"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca\") " pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.320725 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a6223fc-c3a5-462d-b61d-ebd353bbe7ca-ovndb-tls-certs\") pod \"neutron-6465fd554f-k9lkr\" (UID: \"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca\") " pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.320768 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsspq\" (UniqueName: \"kubernetes.io/projected/4a6223fc-c3a5-462d-b61d-ebd353bbe7ca-kube-api-access-xsspq\") pod \"neutron-6465fd554f-k9lkr\" (UID: \"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca\") " pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.353421 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" event={"ID":"830ced8e-31e0-474d-a13e-93d1825c2c8f","Type":"ContainerStarted","Data":"1131c364d46f9f2732f0aa2dcd63b075c6fbc4207d686609f0cdfcc6f2a42f3f"} Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.389692 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" podStartSLOduration=4.389670724 podStartE2EDuration="4.389670724s" podCreationTimestamp="2025-12-13 06:49:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:49:53.380827163 +0000 UTC m=+1227.247421744" watchObservedRunningTime="2025-12-13 06:49:53.389670724 +0000 UTC m=+1227.256265305" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.421835 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a6223fc-c3a5-462d-b61d-ebd353bbe7ca-internal-tls-certs\") pod \"neutron-6465fd554f-k9lkr\" (UID: \"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca\") " pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.421881 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a6223fc-c3a5-462d-b61d-ebd353bbe7ca-combined-ca-bundle\") pod \"neutron-6465fd554f-k9lkr\" (UID: \"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca\") " pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.421916 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a6223fc-c3a5-462d-b61d-ebd353bbe7ca-ovndb-tls-certs\") pod \"neutron-6465fd554f-k9lkr\" (UID: \"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca\") " pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.421941 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsspq\" (UniqueName: \"kubernetes.io/projected/4a6223fc-c3a5-462d-b61d-ebd353bbe7ca-kube-api-access-xsspq\") pod \"neutron-6465fd554f-k9lkr\" (UID: \"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca\") " pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.421990 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4a6223fc-c3a5-462d-b61d-ebd353bbe7ca-config\") pod \"neutron-6465fd554f-k9lkr\" (UID: \"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca\") " pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.422034 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a6223fc-c3a5-462d-b61d-ebd353bbe7ca-public-tls-certs\") pod \"neutron-6465fd554f-k9lkr\" (UID: \"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca\") " pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.422052 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4a6223fc-c3a5-462d-b61d-ebd353bbe7ca-httpd-config\") pod \"neutron-6465fd554f-k9lkr\" (UID: \"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca\") " pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.431994 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a6223fc-c3a5-462d-b61d-ebd353bbe7ca-ovndb-tls-certs\") pod \"neutron-6465fd554f-k9lkr\" (UID: \"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca\") " pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.433600 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a6223fc-c3a5-462d-b61d-ebd353bbe7ca-public-tls-certs\") pod \"neutron-6465fd554f-k9lkr\" (UID: \"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca\") " pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.433837 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/4a6223fc-c3a5-462d-b61d-ebd353bbe7ca-config\") pod \"neutron-6465fd554f-k9lkr\" (UID: \"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca\") " pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.434144 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a6223fc-c3a5-462d-b61d-ebd353bbe7ca-combined-ca-bundle\") pod \"neutron-6465fd554f-k9lkr\" (UID: \"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca\") " pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.447047 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a6223fc-c3a5-462d-b61d-ebd353bbe7ca-internal-tls-certs\") pod \"neutron-6465fd554f-k9lkr\" (UID: \"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca\") " pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.447239 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4a6223fc-c3a5-462d-b61d-ebd353bbe7ca-httpd-config\") pod \"neutron-6465fd554f-k9lkr\" (UID: \"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca\") " pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.453352 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsspq\" (UniqueName: \"kubernetes.io/projected/4a6223fc-c3a5-462d-b61d-ebd353bbe7ca-kube-api-access-xsspq\") pod \"neutron-6465fd554f-k9lkr\" (UID: \"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca\") " pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:53 crc kubenswrapper[5048]: I1213 06:49:53.653115 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.247830 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6465fd554f-k9lkr"] Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.251579 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-75nl7" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.341036 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4qsj7\" (UniqueName: \"kubernetes.io/projected/e351b290-4e5b-496d-94be-545f01ae8e15-kube-api-access-4qsj7\") pod \"e351b290-4e5b-496d-94be-545f01ae8e15\" (UID: \"e351b290-4e5b-496d-94be-545f01ae8e15\") " Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.341134 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e351b290-4e5b-496d-94be-545f01ae8e15-config-data\") pod \"e351b290-4e5b-496d-94be-545f01ae8e15\" (UID: \"e351b290-4e5b-496d-94be-545f01ae8e15\") " Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.341174 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e351b290-4e5b-496d-94be-545f01ae8e15-logs\") pod \"e351b290-4e5b-496d-94be-545f01ae8e15\" (UID: \"e351b290-4e5b-496d-94be-545f01ae8e15\") " Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.341303 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e351b290-4e5b-496d-94be-545f01ae8e15-scripts\") pod \"e351b290-4e5b-496d-94be-545f01ae8e15\" (UID: \"e351b290-4e5b-496d-94be-545f01ae8e15\") " Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.341320 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e351b290-4e5b-496d-94be-545f01ae8e15-combined-ca-bundle\") pod \"e351b290-4e5b-496d-94be-545f01ae8e15\" (UID: \"e351b290-4e5b-496d-94be-545f01ae8e15\") " Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.342408 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e351b290-4e5b-496d-94be-545f01ae8e15-logs" (OuterVolumeSpecName: "logs") pod "e351b290-4e5b-496d-94be-545f01ae8e15" (UID: "e351b290-4e5b-496d-94be-545f01ae8e15"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.350665 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e351b290-4e5b-496d-94be-545f01ae8e15-kube-api-access-4qsj7" (OuterVolumeSpecName: "kube-api-access-4qsj7") pod "e351b290-4e5b-496d-94be-545f01ae8e15" (UID: "e351b290-4e5b-496d-94be-545f01ae8e15"). InnerVolumeSpecName "kube-api-access-4qsj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.375662 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e351b290-4e5b-496d-94be-545f01ae8e15-scripts" (OuterVolumeSpecName: "scripts") pod "e351b290-4e5b-496d-94be-545f01ae8e15" (UID: "e351b290-4e5b-496d-94be-545f01ae8e15"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.387540 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e351b290-4e5b-496d-94be-545f01ae8e15-config-data" (OuterVolumeSpecName: "config-data") pod "e351b290-4e5b-496d-94be-545f01ae8e15" (UID: "e351b290-4e5b-496d-94be-545f01ae8e15"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.414227 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-75nl7" event={"ID":"e351b290-4e5b-496d-94be-545f01ae8e15","Type":"ContainerDied","Data":"c80d529b40b9df3f1eec4e8f22d881adcca7fbb134ff6a6d02fd540195206dd1"} Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.414297 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c80d529b40b9df3f1eec4e8f22d881adcca7fbb134ff6a6d02fd540195206dd1" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.414397 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-75nl7" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.430495 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6465fd554f-k9lkr" event={"ID":"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca","Type":"ContainerStarted","Data":"bd8d782d165f5fced246034e1b3fa76af09311cc3bb7ed2995f1fe272f821186"} Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.430900 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.443938 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e351b290-4e5b-496d-94be-545f01ae8e15-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.443989 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4qsj7\" (UniqueName: \"kubernetes.io/projected/e351b290-4e5b-496d-94be-545f01ae8e15-kube-api-access-4qsj7\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.444008 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e351b290-4e5b-496d-94be-545f01ae8e15-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.444020 5048 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e351b290-4e5b-496d-94be-545f01ae8e15-logs\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.456588 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e351b290-4e5b-496d-94be-545f01ae8e15-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e351b290-4e5b-496d-94be-545f01ae8e15" (UID: "e351b290-4e5b-496d-94be-545f01ae8e15"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.545870 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e351b290-4e5b-496d-94be-545f01ae8e15-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.618965 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-7b9cc68dcb-22pkt"] Dec 13 06:49:54 crc kubenswrapper[5048]: E1213 06:49:54.619300 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e351b290-4e5b-496d-94be-545f01ae8e15" containerName="placement-db-sync" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.619317 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="e351b290-4e5b-496d-94be-545f01ae8e15" containerName="placement-db-sync" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.619557 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="e351b290-4e5b-496d-94be-545f01ae8e15" containerName="placement-db-sync" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.620682 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.627144 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.627183 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.739240 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb3e7c2a-93fb-4bf8-8447-6d4be22a0760-scripts\") pod \"placement-7b9cc68dcb-22pkt\" (UID: \"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760\") " pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.739294 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btnwd\" (UniqueName: \"kubernetes.io/projected/fb3e7c2a-93fb-4bf8-8447-6d4be22a0760-kube-api-access-btnwd\") pod \"placement-7b9cc68dcb-22pkt\" (UID: \"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760\") " pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.739333 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb3e7c2a-93fb-4bf8-8447-6d4be22a0760-combined-ca-bundle\") pod \"placement-7b9cc68dcb-22pkt\" (UID: \"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760\") " pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.739420 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb3e7c2a-93fb-4bf8-8447-6d4be22a0760-internal-tls-certs\") pod \"placement-7b9cc68dcb-22pkt\" (UID: \"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760\") " pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.739526 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb3e7c2a-93fb-4bf8-8447-6d4be22a0760-logs\") pod \"placement-7b9cc68dcb-22pkt\" (UID: \"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760\") " pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.739559 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb3e7c2a-93fb-4bf8-8447-6d4be22a0760-config-data\") pod \"placement-7b9cc68dcb-22pkt\" (UID: \"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760\") " pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.739603 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb3e7c2a-93fb-4bf8-8447-6d4be22a0760-public-tls-certs\") pod \"placement-7b9cc68dcb-22pkt\" (UID: \"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760\") " pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.766739 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7b9cc68dcb-22pkt"] Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.841348 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb3e7c2a-93fb-4bf8-8447-6d4be22a0760-config-data\") pod \"placement-7b9cc68dcb-22pkt\" (UID: \"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760\") " pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.841412 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb3e7c2a-93fb-4bf8-8447-6d4be22a0760-public-tls-certs\") pod \"placement-7b9cc68dcb-22pkt\" (UID: \"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760\") " pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.841497 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb3e7c2a-93fb-4bf8-8447-6d4be22a0760-scripts\") pod \"placement-7b9cc68dcb-22pkt\" (UID: \"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760\") " pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.841522 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btnwd\" (UniqueName: \"kubernetes.io/projected/fb3e7c2a-93fb-4bf8-8447-6d4be22a0760-kube-api-access-btnwd\") pod \"placement-7b9cc68dcb-22pkt\" (UID: \"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760\") " pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.841541 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb3e7c2a-93fb-4bf8-8447-6d4be22a0760-combined-ca-bundle\") pod \"placement-7b9cc68dcb-22pkt\" (UID: \"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760\") " pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.841574 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb3e7c2a-93fb-4bf8-8447-6d4be22a0760-internal-tls-certs\") pod \"placement-7b9cc68dcb-22pkt\" (UID: \"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760\") " pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.841630 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb3e7c2a-93fb-4bf8-8447-6d4be22a0760-logs\") pod \"placement-7b9cc68dcb-22pkt\" (UID: \"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760\") " pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.842160 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb3e7c2a-93fb-4bf8-8447-6d4be22a0760-logs\") pod \"placement-7b9cc68dcb-22pkt\" (UID: \"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760\") " pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.847215 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb3e7c2a-93fb-4bf8-8447-6d4be22a0760-combined-ca-bundle\") pod \"placement-7b9cc68dcb-22pkt\" (UID: \"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760\") " pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.847278 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb3e7c2a-93fb-4bf8-8447-6d4be22a0760-internal-tls-certs\") pod \"placement-7b9cc68dcb-22pkt\" (UID: \"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760\") " pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.858332 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb3e7c2a-93fb-4bf8-8447-6d4be22a0760-scripts\") pod \"placement-7b9cc68dcb-22pkt\" (UID: \"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760\") " pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.858370 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb3e7c2a-93fb-4bf8-8447-6d4be22a0760-public-tls-certs\") pod \"placement-7b9cc68dcb-22pkt\" (UID: \"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760\") " pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.858934 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb3e7c2a-93fb-4bf8-8447-6d4be22a0760-config-data\") pod \"placement-7b9cc68dcb-22pkt\" (UID: \"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760\") " pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:54 crc kubenswrapper[5048]: I1213 06:49:54.863675 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btnwd\" (UniqueName: \"kubernetes.io/projected/fb3e7c2a-93fb-4bf8-8447-6d4be22a0760-kube-api-access-btnwd\") pod \"placement-7b9cc68dcb-22pkt\" (UID: \"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760\") " pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:55 crc kubenswrapper[5048]: I1213 06:49:55.069921 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:49:55 crc kubenswrapper[5048]: I1213 06:49:55.790137 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7b9cc68dcb-22pkt"] Dec 13 06:49:56 crc kubenswrapper[5048]: I1213 06:49:56.135216 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 13 06:49:56 crc kubenswrapper[5048]: I1213 06:49:56.135471 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 13 06:49:56 crc kubenswrapper[5048]: I1213 06:49:56.194616 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 13 06:49:56 crc kubenswrapper[5048]: I1213 06:49:56.199917 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 13 06:49:56 crc kubenswrapper[5048]: I1213 06:49:56.587817 5048 generic.go:334] "Generic (PLEG): container finished" podID="2d92d376-9d6c-404a-b1f5-a13a67276b6f" containerID="696cec693d739a77d0fa2773a7c4e66455dc16b82749b6448d55f0d5b8782b68" exitCode=0 Dec 13 06:49:56 crc kubenswrapper[5048]: I1213 06:49:56.594148 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jzn5h" event={"ID":"2d92d376-9d6c-404a-b1f5-a13a67276b6f","Type":"ContainerDied","Data":"696cec693d739a77d0fa2773a7c4e66455dc16b82749b6448d55f0d5b8782b68"} Dec 13 06:49:56 crc kubenswrapper[5048]: I1213 06:49:56.594189 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7b9cc68dcb-22pkt" event={"ID":"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760","Type":"ContainerStarted","Data":"3dce05278305f1d387273301bf6e97da06c15fed242545885f375a365ae54527"} Dec 13 06:49:56 crc kubenswrapper[5048]: I1213 06:49:56.594199 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7b9cc68dcb-22pkt" event={"ID":"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760","Type":"ContainerStarted","Data":"4d0e524321e0c3d10dd891b064677a7ed5889305e53afaca5cfc2ffb0f1c3f79"} Dec 13 06:49:56 crc kubenswrapper[5048]: I1213 06:49:56.595618 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6465fd554f-k9lkr" event={"ID":"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca","Type":"ContainerStarted","Data":"ce761408c0e316c351027f8535bc2bc0482293c1961086e4debdb8c3373ff6bd"} Dec 13 06:49:56 crc kubenswrapper[5048]: I1213 06:49:56.595696 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6465fd554f-k9lkr" event={"ID":"4a6223fc-c3a5-462d-b61d-ebd353bbe7ca","Type":"ContainerStarted","Data":"bf846c658392ee5e798aa2b5453196feff77dadabeb8afa3970eb10ec6a1077f"} Dec 13 06:49:56 crc kubenswrapper[5048]: I1213 06:49:56.596012 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 13 06:49:56 crc kubenswrapper[5048]: I1213 06:49:56.596029 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 13 06:49:56 crc kubenswrapper[5048]: I1213 06:49:56.596039 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:49:56 crc kubenswrapper[5048]: I1213 06:49:56.673628 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-6465fd554f-k9lkr" podStartSLOduration=3.673605588 podStartE2EDuration="3.673605588s" podCreationTimestamp="2025-12-13 06:49:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:49:56.669467665 +0000 UTC m=+1230.536062256" watchObservedRunningTime="2025-12-13 06:49:56.673605588 +0000 UTC m=+1230.540200169" Dec 13 06:49:56 crc kubenswrapper[5048]: I1213 06:49:56.792081 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:56 crc kubenswrapper[5048]: I1213 06:49:56.792138 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:49:56 crc kubenswrapper[5048]: I1213 06:49:56.945221 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:56 crc kubenswrapper[5048]: I1213 06:49:56.945278 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:49:57 crc kubenswrapper[5048]: I1213 06:49:57.521353 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 13 06:49:57 crc kubenswrapper[5048]: I1213 06:49:57.521856 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 13 06:49:57 crc kubenswrapper[5048]: I1213 06:49:57.582001 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 13 06:49:57 crc kubenswrapper[5048]: I1213 06:49:57.594926 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 13 06:49:57 crc kubenswrapper[5048]: I1213 06:49:57.607658 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 13 06:49:57 crc kubenswrapper[5048]: I1213 06:49:57.607695 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 13 06:49:59 crc kubenswrapper[5048]: I1213 06:49:59.370703 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 13 06:49:59 crc kubenswrapper[5048]: I1213 06:49:59.371271 5048 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 13 06:49:59 crc kubenswrapper[5048]: I1213 06:49:59.373163 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 13 06:49:59 crc kubenswrapper[5048]: I1213 06:49:59.760394 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:49:59 crc kubenswrapper[5048]: I1213 06:49:59.821534 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-pmzx8"] Dec 13 06:49:59 crc kubenswrapper[5048]: I1213 06:49:59.821764 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" podUID="6aaba4d3-75f7-4e64-83fb-95039921e50b" containerName="dnsmasq-dns" containerID="cri-o://4180f53c8df8d329aec89da7fe89a5ae17927f4b50bc304d8b7f4c2017c56471" gracePeriod=10 Dec 13 06:50:00 crc kubenswrapper[5048]: I1213 06:50:00.208129 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 13 06:50:00 crc kubenswrapper[5048]: I1213 06:50:00.208252 5048 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 13 06:50:00 crc kubenswrapper[5048]: I1213 06:50:00.635095 5048 generic.go:334] "Generic (PLEG): container finished" podID="6aaba4d3-75f7-4e64-83fb-95039921e50b" containerID="4180f53c8df8d329aec89da7fe89a5ae17927f4b50bc304d8b7f4c2017c56471" exitCode=0 Dec 13 06:50:00 crc kubenswrapper[5048]: I1213 06:50:00.635138 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" event={"ID":"6aaba4d3-75f7-4e64-83fb-95039921e50b","Type":"ContainerDied","Data":"4180f53c8df8d329aec89da7fe89a5ae17927f4b50bc304d8b7f4c2017c56471"} Dec 13 06:50:00 crc kubenswrapper[5048]: I1213 06:50:00.750019 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 13 06:50:01 crc kubenswrapper[5048]: I1213 06:50:01.071497 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" podUID="6aaba4d3-75f7-4e64-83fb-95039921e50b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.142:5353: connect: connection refused" Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.679548 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jzn5h" Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.699136 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jzn5h" event={"ID":"2d92d376-9d6c-404a-b1f5-a13a67276b6f","Type":"ContainerDied","Data":"f658179f1045b16f7ce59fa79acb9bb08ab549060321280463d8096227c93159"} Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.699186 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f658179f1045b16f7ce59fa79acb9bb08ab549060321280463d8096227c93159" Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.740972 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.812310 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-fernet-keys\") pod \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.812366 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-config-data\") pod \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.812473 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-credential-keys\") pod \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.812556 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kshmm\" (UniqueName: \"kubernetes.io/projected/2d92d376-9d6c-404a-b1f5-a13a67276b6f-kube-api-access-kshmm\") pod \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.812620 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-combined-ca-bundle\") pod \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.812703 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-scripts\") pod \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\" (UID: \"2d92d376-9d6c-404a-b1f5-a13a67276b6f\") " Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.826741 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-scripts" (OuterVolumeSpecName: "scripts") pod "2d92d376-9d6c-404a-b1f5-a13a67276b6f" (UID: "2d92d376-9d6c-404a-b1f5-a13a67276b6f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.829586 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "2d92d376-9d6c-404a-b1f5-a13a67276b6f" (UID: "2d92d376-9d6c-404a-b1f5-a13a67276b6f"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.830790 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "2d92d376-9d6c-404a-b1f5-a13a67276b6f" (UID: "2d92d376-9d6c-404a-b1f5-a13a67276b6f"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.831600 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d92d376-9d6c-404a-b1f5-a13a67276b6f-kube-api-access-kshmm" (OuterVolumeSpecName: "kube-api-access-kshmm") pod "2d92d376-9d6c-404a-b1f5-a13a67276b6f" (UID: "2d92d376-9d6c-404a-b1f5-a13a67276b6f"). InnerVolumeSpecName "kube-api-access-kshmm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.860085 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-config-data" (OuterVolumeSpecName: "config-data") pod "2d92d376-9d6c-404a-b1f5-a13a67276b6f" (UID: "2d92d376-9d6c-404a-b1f5-a13a67276b6f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.860613 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2d92d376-9d6c-404a-b1f5-a13a67276b6f" (UID: "2d92d376-9d6c-404a-b1f5-a13a67276b6f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.913862 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j6ttg\" (UniqueName: \"kubernetes.io/projected/6aaba4d3-75f7-4e64-83fb-95039921e50b-kube-api-access-j6ttg\") pod \"6aaba4d3-75f7-4e64-83fb-95039921e50b\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.913968 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-dns-swift-storage-0\") pod \"6aaba4d3-75f7-4e64-83fb-95039921e50b\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.913999 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-config\") pod \"6aaba4d3-75f7-4e64-83fb-95039921e50b\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.914031 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-ovsdbserver-sb\") pod \"6aaba4d3-75f7-4e64-83fb-95039921e50b\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.914111 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-ovsdbserver-nb\") pod \"6aaba4d3-75f7-4e64-83fb-95039921e50b\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.914160 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-dns-svc\") pod \"6aaba4d3-75f7-4e64-83fb-95039921e50b\" (UID: \"6aaba4d3-75f7-4e64-83fb-95039921e50b\") " Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.914614 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.914633 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.914642 5048 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.914650 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.914658 5048 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2d92d376-9d6c-404a-b1f5-a13a67276b6f-credential-keys\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.914666 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kshmm\" (UniqueName: \"kubernetes.io/projected/2d92d376-9d6c-404a-b1f5-a13a67276b6f-kube-api-access-kshmm\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.929704 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6aaba4d3-75f7-4e64-83fb-95039921e50b-kube-api-access-j6ttg" (OuterVolumeSpecName: "kube-api-access-j6ttg") pod "6aaba4d3-75f7-4e64-83fb-95039921e50b" (UID: "6aaba4d3-75f7-4e64-83fb-95039921e50b"). InnerVolumeSpecName "kube-api-access-j6ttg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.978286 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6aaba4d3-75f7-4e64-83fb-95039921e50b" (UID: "6aaba4d3-75f7-4e64-83fb-95039921e50b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.991037 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6aaba4d3-75f7-4e64-83fb-95039921e50b" (UID: "6aaba4d3-75f7-4e64-83fb-95039921e50b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:03 crc kubenswrapper[5048]: I1213 06:50:03.993760 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-config" (OuterVolumeSpecName: "config") pod "6aaba4d3-75f7-4e64-83fb-95039921e50b" (UID: "6aaba4d3-75f7-4e64-83fb-95039921e50b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.009986 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "6aaba4d3-75f7-4e64-83fb-95039921e50b" (UID: "6aaba4d3-75f7-4e64-83fb-95039921e50b"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.011854 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6aaba4d3-75f7-4e64-83fb-95039921e50b" (UID: "6aaba4d3-75f7-4e64-83fb-95039921e50b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.016322 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j6ttg\" (UniqueName: \"kubernetes.io/projected/6aaba4d3-75f7-4e64-83fb-95039921e50b-kube-api-access-j6ttg\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.016360 5048 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.016371 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.016382 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.016395 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.016405 5048 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6aaba4d3-75f7-4e64-83fb-95039921e50b-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.726144 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" event={"ID":"6aaba4d3-75f7-4e64-83fb-95039921e50b","Type":"ContainerDied","Data":"b74b82ddb076f848159be4b97c3bdc57054725ba9160ffd1a2b686a8fc9874df"} Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.726204 5048 scope.go:117] "RemoveContainer" containerID="4180f53c8df8d329aec89da7fe89a5ae17927f4b50bc304d8b7f4c2017c56471" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.726314 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-pmzx8" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.733171 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-pzgrc" event={"ID":"af6830b6-96f2-487f-ba02-93c7f01d0ceb","Type":"ContainerStarted","Data":"4e9ffef4eb8dff919102ef5693783b933d9927705c70a2fa4abdd604bc7a3c0f"} Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.738896 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b","Type":"ContainerStarted","Data":"a018e2fc6d16a78d973ff5dbf2bbd7e1be2eb4ee7c9c05b0c8b477d00a261cff"} Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.743344 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7b9cc68dcb-22pkt" event={"ID":"fb3e7c2a-93fb-4bf8-8447-6d4be22a0760","Type":"ContainerStarted","Data":"fc9d6cc79197efc247a9860dcece6b565ca592799ff1a998f0d81138deefe3d9"} Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.743389 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jzn5h" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.743699 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.755843 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-pmzx8"] Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.762371 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-pmzx8"] Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.768976 5048 scope.go:117] "RemoveContainer" containerID="67d8f9858481513d0a5bc6370ee0ed3b40ea58ab3bdd16cd4b01391f5143e77a" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.785618 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-pzgrc" podStartSLOduration=3.980690735 podStartE2EDuration="49.784851931s" podCreationTimestamp="2025-12-13 06:49:15 +0000 UTC" firstStartedPulling="2025-12-13 06:49:17.642593143 +0000 UTC m=+1191.509187724" lastFinishedPulling="2025-12-13 06:50:03.446754339 +0000 UTC m=+1237.313348920" observedRunningTime="2025-12-13 06:50:04.782837387 +0000 UTC m=+1238.649431968" watchObservedRunningTime="2025-12-13 06:50:04.784851931 +0000 UTC m=+1238.651446512" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.824641 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-5c89574bc9-gw9cl"] Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.825004 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-7b9cc68dcb-22pkt" podStartSLOduration=10.824982968 podStartE2EDuration="10.824982968s" podCreationTimestamp="2025-12-13 06:49:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:50:04.804856008 +0000 UTC m=+1238.671450589" watchObservedRunningTime="2025-12-13 06:50:04.824982968 +0000 UTC m=+1238.691577559" Dec 13 06:50:04 crc kubenswrapper[5048]: E1213 06:50:04.825126 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6aaba4d3-75f7-4e64-83fb-95039921e50b" containerName="dnsmasq-dns" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.825141 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="6aaba4d3-75f7-4e64-83fb-95039921e50b" containerName="dnsmasq-dns" Dec 13 06:50:04 crc kubenswrapper[5048]: E1213 06:50:04.825157 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6aaba4d3-75f7-4e64-83fb-95039921e50b" containerName="init" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.825163 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="6aaba4d3-75f7-4e64-83fb-95039921e50b" containerName="init" Dec 13 06:50:04 crc kubenswrapper[5048]: E1213 06:50:04.825175 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d92d376-9d6c-404a-b1f5-a13a67276b6f" containerName="keystone-bootstrap" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.825182 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d92d376-9d6c-404a-b1f5-a13a67276b6f" containerName="keystone-bootstrap" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.825352 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="6aaba4d3-75f7-4e64-83fb-95039921e50b" containerName="dnsmasq-dns" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.825372 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d92d376-9d6c-404a-b1f5-a13a67276b6f" containerName="keystone-bootstrap" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.827313 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.839077 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.839261 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-scw59" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.839375 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.839539 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.839705 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.839877 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.856592 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5c89574bc9-gw9cl"] Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.938124 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d339d78d-798f-4147-85f3-87e7a05515dc-scripts\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.938194 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d339d78d-798f-4147-85f3-87e7a05515dc-combined-ca-bundle\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.938224 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d339d78d-798f-4147-85f3-87e7a05515dc-credential-keys\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.938307 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d339d78d-798f-4147-85f3-87e7a05515dc-config-data\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.938326 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d339d78d-798f-4147-85f3-87e7a05515dc-fernet-keys\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.938346 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8c6d\" (UniqueName: \"kubernetes.io/projected/d339d78d-798f-4147-85f3-87e7a05515dc-kube-api-access-l8c6d\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.938363 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d339d78d-798f-4147-85f3-87e7a05515dc-internal-tls-certs\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:04 crc kubenswrapper[5048]: I1213 06:50:04.938378 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d339d78d-798f-4147-85f3-87e7a05515dc-public-tls-certs\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:05 crc kubenswrapper[5048]: I1213 06:50:05.039944 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d339d78d-798f-4147-85f3-87e7a05515dc-combined-ca-bundle\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:05 crc kubenswrapper[5048]: I1213 06:50:05.040009 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d339d78d-798f-4147-85f3-87e7a05515dc-credential-keys\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:05 crc kubenswrapper[5048]: I1213 06:50:05.040081 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d339d78d-798f-4147-85f3-87e7a05515dc-config-data\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:05 crc kubenswrapper[5048]: I1213 06:50:05.040097 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d339d78d-798f-4147-85f3-87e7a05515dc-fernet-keys\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:05 crc kubenswrapper[5048]: I1213 06:50:05.040121 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8c6d\" (UniqueName: \"kubernetes.io/projected/d339d78d-798f-4147-85f3-87e7a05515dc-kube-api-access-l8c6d\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:05 crc kubenswrapper[5048]: I1213 06:50:05.040138 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d339d78d-798f-4147-85f3-87e7a05515dc-internal-tls-certs\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:05 crc kubenswrapper[5048]: I1213 06:50:05.040151 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d339d78d-798f-4147-85f3-87e7a05515dc-public-tls-certs\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:05 crc kubenswrapper[5048]: I1213 06:50:05.040183 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d339d78d-798f-4147-85f3-87e7a05515dc-scripts\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:05 crc kubenswrapper[5048]: I1213 06:50:05.056198 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d339d78d-798f-4147-85f3-87e7a05515dc-internal-tls-certs\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:05 crc kubenswrapper[5048]: I1213 06:50:05.058754 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d339d78d-798f-4147-85f3-87e7a05515dc-credential-keys\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:05 crc kubenswrapper[5048]: I1213 06:50:05.058873 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d339d78d-798f-4147-85f3-87e7a05515dc-scripts\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:05 crc kubenswrapper[5048]: I1213 06:50:05.060598 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d339d78d-798f-4147-85f3-87e7a05515dc-combined-ca-bundle\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:05 crc kubenswrapper[5048]: I1213 06:50:05.074711 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d339d78d-798f-4147-85f3-87e7a05515dc-config-data\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:05 crc kubenswrapper[5048]: I1213 06:50:05.078226 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8c6d\" (UniqueName: \"kubernetes.io/projected/d339d78d-798f-4147-85f3-87e7a05515dc-kube-api-access-l8c6d\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:05 crc kubenswrapper[5048]: I1213 06:50:05.083071 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d339d78d-798f-4147-85f3-87e7a05515dc-public-tls-certs\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:05 crc kubenswrapper[5048]: I1213 06:50:05.083361 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d339d78d-798f-4147-85f3-87e7a05515dc-fernet-keys\") pod \"keystone-5c89574bc9-gw9cl\" (UID: \"d339d78d-798f-4147-85f3-87e7a05515dc\") " pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:05 crc kubenswrapper[5048]: I1213 06:50:05.167425 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:05 crc kubenswrapper[5048]: I1213 06:50:05.685497 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5c89574bc9-gw9cl"] Dec 13 06:50:05 crc kubenswrapper[5048]: I1213 06:50:05.758395 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5c89574bc9-gw9cl" event={"ID":"d339d78d-798f-4147-85f3-87e7a05515dc","Type":"ContainerStarted","Data":"1a39b8c8200e1a65c8a23e693e942e613307e8cbf70dc941d3ff4d71e0a20c2c"} Dec 13 06:50:05 crc kubenswrapper[5048]: I1213 06:50:05.763534 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-9xjhb" event={"ID":"8a4a2da1-cc7f-474f-baf7-16c352bd0708","Type":"ContainerStarted","Data":"740fb7bacfa2a3b3b6ad58b4b499b2ec58dc979e363111b4788f11f11437cdd0"} Dec 13 06:50:05 crc kubenswrapper[5048]: I1213 06:50:05.763756 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:50:05 crc kubenswrapper[5048]: I1213 06:50:05.797907 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-9xjhb" podStartSLOduration=3.72467558 podStartE2EDuration="51.797887541s" podCreationTimestamp="2025-12-13 06:49:14 +0000 UTC" firstStartedPulling="2025-12-13 06:49:15.970994893 +0000 UTC m=+1189.837589474" lastFinishedPulling="2025-12-13 06:50:04.044206854 +0000 UTC m=+1237.910801435" observedRunningTime="2025-12-13 06:50:05.792631778 +0000 UTC m=+1239.659226369" watchObservedRunningTime="2025-12-13 06:50:05.797887541 +0000 UTC m=+1239.664482112" Dec 13 06:50:06 crc kubenswrapper[5048]: I1213 06:50:06.446944 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:50:06 crc kubenswrapper[5048]: I1213 06:50:06.582026 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6aaba4d3-75f7-4e64-83fb-95039921e50b" path="/var/lib/kubelet/pods/6aaba4d3-75f7-4e64-83fb-95039921e50b/volumes" Dec 13 06:50:06 crc kubenswrapper[5048]: I1213 06:50:06.780503 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5c89574bc9-gw9cl" event={"ID":"d339d78d-798f-4147-85f3-87e7a05515dc","Type":"ContainerStarted","Data":"09fbf789e5a2d3217f985fafaff9b0aca75f076b3ee087c85e037a418082d1f7"} Dec 13 06:50:06 crc kubenswrapper[5048]: I1213 06:50:06.781515 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:06 crc kubenswrapper[5048]: I1213 06:50:06.789821 5048 generic.go:334] "Generic (PLEG): container finished" podID="af6830b6-96f2-487f-ba02-93c7f01d0ceb" containerID="4e9ffef4eb8dff919102ef5693783b933d9927705c70a2fa4abdd604bc7a3c0f" exitCode=0 Dec 13 06:50:06 crc kubenswrapper[5048]: I1213 06:50:06.792525 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-pzgrc" event={"ID":"af6830b6-96f2-487f-ba02-93c7f01d0ceb","Type":"ContainerDied","Data":"4e9ffef4eb8dff919102ef5693783b933d9927705c70a2fa4abdd604bc7a3c0f"} Dec 13 06:50:06 crc kubenswrapper[5048]: I1213 06:50:06.794006 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5fdc45567b-kg45h" podUID="3c13197f-14c3-45a9-ba9c-bc89b80d6169" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.145:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.145:8443: connect: connection refused" Dec 13 06:50:06 crc kubenswrapper[5048]: I1213 06:50:06.804935 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-5c89574bc9-gw9cl" podStartSLOduration=2.8049188579999997 podStartE2EDuration="2.804918858s" podCreationTimestamp="2025-12-13 06:50:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:50:06.802647866 +0000 UTC m=+1240.669242467" watchObservedRunningTime="2025-12-13 06:50:06.804918858 +0000 UTC m=+1240.671513439" Dec 13 06:50:06 crc kubenswrapper[5048]: I1213 06:50:06.956305 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-657fc95f76-vznd4" podUID="1a49463c-d974-4631-b6ef-3f88d734ac2d" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.146:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.146:8443: connect: connection refused" Dec 13 06:50:08 crc kubenswrapper[5048]: I1213 06:50:08.171039 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-pzgrc" Dec 13 06:50:08 crc kubenswrapper[5048]: I1213 06:50:08.331087 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/af6830b6-96f2-487f-ba02-93c7f01d0ceb-db-sync-config-data\") pod \"af6830b6-96f2-487f-ba02-93c7f01d0ceb\" (UID: \"af6830b6-96f2-487f-ba02-93c7f01d0ceb\") " Dec 13 06:50:08 crc kubenswrapper[5048]: I1213 06:50:08.331195 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af6830b6-96f2-487f-ba02-93c7f01d0ceb-combined-ca-bundle\") pod \"af6830b6-96f2-487f-ba02-93c7f01d0ceb\" (UID: \"af6830b6-96f2-487f-ba02-93c7f01d0ceb\") " Dec 13 06:50:08 crc kubenswrapper[5048]: I1213 06:50:08.331230 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-677wd\" (UniqueName: \"kubernetes.io/projected/af6830b6-96f2-487f-ba02-93c7f01d0ceb-kube-api-access-677wd\") pod \"af6830b6-96f2-487f-ba02-93c7f01d0ceb\" (UID: \"af6830b6-96f2-487f-ba02-93c7f01d0ceb\") " Dec 13 06:50:08 crc kubenswrapper[5048]: I1213 06:50:08.338643 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af6830b6-96f2-487f-ba02-93c7f01d0ceb-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "af6830b6-96f2-487f-ba02-93c7f01d0ceb" (UID: "af6830b6-96f2-487f-ba02-93c7f01d0ceb"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:08 crc kubenswrapper[5048]: I1213 06:50:08.341772 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af6830b6-96f2-487f-ba02-93c7f01d0ceb-kube-api-access-677wd" (OuterVolumeSpecName: "kube-api-access-677wd") pod "af6830b6-96f2-487f-ba02-93c7f01d0ceb" (UID: "af6830b6-96f2-487f-ba02-93c7f01d0ceb"). InnerVolumeSpecName "kube-api-access-677wd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:50:08 crc kubenswrapper[5048]: I1213 06:50:08.391612 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af6830b6-96f2-487f-ba02-93c7f01d0ceb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "af6830b6-96f2-487f-ba02-93c7f01d0ceb" (UID: "af6830b6-96f2-487f-ba02-93c7f01d0ceb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:08 crc kubenswrapper[5048]: I1213 06:50:08.433496 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af6830b6-96f2-487f-ba02-93c7f01d0ceb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:08 crc kubenswrapper[5048]: I1213 06:50:08.433555 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-677wd\" (UniqueName: \"kubernetes.io/projected/af6830b6-96f2-487f-ba02-93c7f01d0ceb-kube-api-access-677wd\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:08 crc kubenswrapper[5048]: I1213 06:50:08.433572 5048 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/af6830b6-96f2-487f-ba02-93c7f01d0ceb-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:08 crc kubenswrapper[5048]: I1213 06:50:08.813346 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-pzgrc" Dec 13 06:50:08 crc kubenswrapper[5048]: I1213 06:50:08.814432 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-pzgrc" event={"ID":"af6830b6-96f2-487f-ba02-93c7f01d0ceb","Type":"ContainerDied","Data":"8386a0287dd9ccbecb46b778d12bc7a3212b9a585eb7b8a15e454a0573bd7da3"} Dec 13 06:50:08 crc kubenswrapper[5048]: I1213 06:50:08.816579 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8386a0287dd9ccbecb46b778d12bc7a3212b9a585eb7b8a15e454a0573bd7da3" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.075495 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-7888dc9665-xrmff"] Dec 13 06:50:09 crc kubenswrapper[5048]: E1213 06:50:09.075845 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af6830b6-96f2-487f-ba02-93c7f01d0ceb" containerName="barbican-db-sync" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.075862 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="af6830b6-96f2-487f-ba02-93c7f01d0ceb" containerName="barbican-db-sync" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.076039 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="af6830b6-96f2-487f-ba02-93c7f01d0ceb" containerName="barbican-db-sync" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.086418 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7888dc9665-xrmff" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.094909 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.095162 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.105524 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-6lzvh" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.117362 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-7dd9cf6646-xj92w"] Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.119262 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7dd9cf6646-xj92w" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.135149 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.141387 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7888dc9665-xrmff"] Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.177639 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-7dd9cf6646-xj92w"] Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.234738 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-f69kg"] Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.241316 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.254202 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff1df1f6-e2a6-4e70-b352-f3b15b9255d7-logs\") pod \"barbican-keystone-listener-7dd9cf6646-xj92w\" (UID: \"ff1df1f6-e2a6-4e70-b352-f3b15b9255d7\") " pod="openstack/barbican-keystone-listener-7dd9cf6646-xj92w" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.254319 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec726365-6964-4e11-942d-d57482573f01-logs\") pod \"barbican-worker-7888dc9665-xrmff\" (UID: \"ec726365-6964-4e11-942d-d57482573f01\") " pod="openstack/barbican-worker-7888dc9665-xrmff" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.254360 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ff1df1f6-e2a6-4e70-b352-f3b15b9255d7-config-data-custom\") pod \"barbican-keystone-listener-7dd9cf6646-xj92w\" (UID: \"ff1df1f6-e2a6-4e70-b352-f3b15b9255d7\") " pod="openstack/barbican-keystone-listener-7dd9cf6646-xj92w" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.254399 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec726365-6964-4e11-942d-d57482573f01-config-data\") pod \"barbican-worker-7888dc9665-xrmff\" (UID: \"ec726365-6964-4e11-942d-d57482573f01\") " pod="openstack/barbican-worker-7888dc9665-xrmff" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.254578 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff1df1f6-e2a6-4e70-b352-f3b15b9255d7-config-data\") pod \"barbican-keystone-listener-7dd9cf6646-xj92w\" (UID: \"ff1df1f6-e2a6-4e70-b352-f3b15b9255d7\") " pod="openstack/barbican-keystone-listener-7dd9cf6646-xj92w" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.254643 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52pcr\" (UniqueName: \"kubernetes.io/projected/ff1df1f6-e2a6-4e70-b352-f3b15b9255d7-kube-api-access-52pcr\") pod \"barbican-keystone-listener-7dd9cf6646-xj92w\" (UID: \"ff1df1f6-e2a6-4e70-b352-f3b15b9255d7\") " pod="openstack/barbican-keystone-listener-7dd9cf6646-xj92w" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.254702 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ec726365-6964-4e11-942d-d57482573f01-config-data-custom\") pod \"barbican-worker-7888dc9665-xrmff\" (UID: \"ec726365-6964-4e11-942d-d57482573f01\") " pod="openstack/barbican-worker-7888dc9665-xrmff" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.254740 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec726365-6964-4e11-942d-d57482573f01-combined-ca-bundle\") pod \"barbican-worker-7888dc9665-xrmff\" (UID: \"ec726365-6964-4e11-942d-d57482573f01\") " pod="openstack/barbican-worker-7888dc9665-xrmff" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.254785 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmp9t\" (UniqueName: \"kubernetes.io/projected/ec726365-6964-4e11-942d-d57482573f01-kube-api-access-bmp9t\") pod \"barbican-worker-7888dc9665-xrmff\" (UID: \"ec726365-6964-4e11-942d-d57482573f01\") " pod="openstack/barbican-worker-7888dc9665-xrmff" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.254834 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff1df1f6-e2a6-4e70-b352-f3b15b9255d7-combined-ca-bundle\") pod \"barbican-keystone-listener-7dd9cf6646-xj92w\" (UID: \"ff1df1f6-e2a6-4e70-b352-f3b15b9255d7\") " pod="openstack/barbican-keystone-listener-7dd9cf6646-xj92w" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.259827 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-f69kg"] Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.298789 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-648c8d65dd-tpz2s"] Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.300400 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-648c8d65dd-tpz2s" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.310931 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.343559 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-648c8d65dd-tpz2s"] Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.359688 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9n9tc\" (UniqueName: \"kubernetes.io/projected/82ebb804-a23f-4cb3-8f2b-2cfda6815108-kube-api-access-9n9tc\") pod \"dnsmasq-dns-848cf88cfc-f69kg\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.359762 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52pcr\" (UniqueName: \"kubernetes.io/projected/ff1df1f6-e2a6-4e70-b352-f3b15b9255d7-kube-api-access-52pcr\") pod \"barbican-keystone-listener-7dd9cf6646-xj92w\" (UID: \"ff1df1f6-e2a6-4e70-b352-f3b15b9255d7\") " pod="openstack/barbican-keystone-listener-7dd9cf6646-xj92w" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.359829 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-f69kg\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.359871 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ec726365-6964-4e11-942d-d57482573f01-config-data-custom\") pod \"barbican-worker-7888dc9665-xrmff\" (UID: \"ec726365-6964-4e11-942d-d57482573f01\") " pod="openstack/barbican-worker-7888dc9665-xrmff" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.359912 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec726365-6964-4e11-942d-d57482573f01-combined-ca-bundle\") pod \"barbican-worker-7888dc9665-xrmff\" (UID: \"ec726365-6964-4e11-942d-d57482573f01\") " pod="openstack/barbican-worker-7888dc9665-xrmff" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.359949 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmp9t\" (UniqueName: \"kubernetes.io/projected/ec726365-6964-4e11-942d-d57482573f01-kube-api-access-bmp9t\") pod \"barbican-worker-7888dc9665-xrmff\" (UID: \"ec726365-6964-4e11-942d-d57482573f01\") " pod="openstack/barbican-worker-7888dc9665-xrmff" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.359987 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-config\") pod \"dnsmasq-dns-848cf88cfc-f69kg\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.360039 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff1df1f6-e2a6-4e70-b352-f3b15b9255d7-combined-ca-bundle\") pod \"barbican-keystone-listener-7dd9cf6646-xj92w\" (UID: \"ff1df1f6-e2a6-4e70-b352-f3b15b9255d7\") " pod="openstack/barbican-keystone-listener-7dd9cf6646-xj92w" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.360122 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff1df1f6-e2a6-4e70-b352-f3b15b9255d7-logs\") pod \"barbican-keystone-listener-7dd9cf6646-xj92w\" (UID: \"ff1df1f6-e2a6-4e70-b352-f3b15b9255d7\") " pod="openstack/barbican-keystone-listener-7dd9cf6646-xj92w" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.360164 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-f69kg\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.360211 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec726365-6964-4e11-942d-d57482573f01-logs\") pod \"barbican-worker-7888dc9665-xrmff\" (UID: \"ec726365-6964-4e11-942d-d57482573f01\") " pod="openstack/barbican-worker-7888dc9665-xrmff" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.360252 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ff1df1f6-e2a6-4e70-b352-f3b15b9255d7-config-data-custom\") pod \"barbican-keystone-listener-7dd9cf6646-xj92w\" (UID: \"ff1df1f6-e2a6-4e70-b352-f3b15b9255d7\") " pod="openstack/barbican-keystone-listener-7dd9cf6646-xj92w" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.360301 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec726365-6964-4e11-942d-d57482573f01-config-data\") pod \"barbican-worker-7888dc9665-xrmff\" (UID: \"ec726365-6964-4e11-942d-d57482573f01\") " pod="openstack/barbican-worker-7888dc9665-xrmff" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.360415 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-f69kg\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.360485 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff1df1f6-e2a6-4e70-b352-f3b15b9255d7-config-data\") pod \"barbican-keystone-listener-7dd9cf6646-xj92w\" (UID: \"ff1df1f6-e2a6-4e70-b352-f3b15b9255d7\") " pod="openstack/barbican-keystone-listener-7dd9cf6646-xj92w" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.360525 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-f69kg\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.365394 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec726365-6964-4e11-942d-d57482573f01-logs\") pod \"barbican-worker-7888dc9665-xrmff\" (UID: \"ec726365-6964-4e11-942d-d57482573f01\") " pod="openstack/barbican-worker-7888dc9665-xrmff" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.366013 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec726365-6964-4e11-942d-d57482573f01-combined-ca-bundle\") pod \"barbican-worker-7888dc9665-xrmff\" (UID: \"ec726365-6964-4e11-942d-d57482573f01\") " pod="openstack/barbican-worker-7888dc9665-xrmff" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.375376 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff1df1f6-e2a6-4e70-b352-f3b15b9255d7-combined-ca-bundle\") pod \"barbican-keystone-listener-7dd9cf6646-xj92w\" (UID: \"ff1df1f6-e2a6-4e70-b352-f3b15b9255d7\") " pod="openstack/barbican-keystone-listener-7dd9cf6646-xj92w" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.375484 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ff1df1f6-e2a6-4e70-b352-f3b15b9255d7-config-data-custom\") pod \"barbican-keystone-listener-7dd9cf6646-xj92w\" (UID: \"ff1df1f6-e2a6-4e70-b352-f3b15b9255d7\") " pod="openstack/barbican-keystone-listener-7dd9cf6646-xj92w" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.377033 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec726365-6964-4e11-942d-d57482573f01-config-data\") pod \"barbican-worker-7888dc9665-xrmff\" (UID: \"ec726365-6964-4e11-942d-d57482573f01\") " pod="openstack/barbican-worker-7888dc9665-xrmff" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.388237 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff1df1f6-e2a6-4e70-b352-f3b15b9255d7-logs\") pod \"barbican-keystone-listener-7dd9cf6646-xj92w\" (UID: \"ff1df1f6-e2a6-4e70-b352-f3b15b9255d7\") " pod="openstack/barbican-keystone-listener-7dd9cf6646-xj92w" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.389635 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ec726365-6964-4e11-942d-d57482573f01-config-data-custom\") pod \"barbican-worker-7888dc9665-xrmff\" (UID: \"ec726365-6964-4e11-942d-d57482573f01\") " pod="openstack/barbican-worker-7888dc9665-xrmff" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.403936 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52pcr\" (UniqueName: \"kubernetes.io/projected/ff1df1f6-e2a6-4e70-b352-f3b15b9255d7-kube-api-access-52pcr\") pod \"barbican-keystone-listener-7dd9cf6646-xj92w\" (UID: \"ff1df1f6-e2a6-4e70-b352-f3b15b9255d7\") " pod="openstack/barbican-keystone-listener-7dd9cf6646-xj92w" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.426137 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmp9t\" (UniqueName: \"kubernetes.io/projected/ec726365-6964-4e11-942d-d57482573f01-kube-api-access-bmp9t\") pod \"barbican-worker-7888dc9665-xrmff\" (UID: \"ec726365-6964-4e11-942d-d57482573f01\") " pod="openstack/barbican-worker-7888dc9665-xrmff" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.446259 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7888dc9665-xrmff" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.451326 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff1df1f6-e2a6-4e70-b352-f3b15b9255d7-config-data\") pod \"barbican-keystone-listener-7dd9cf6646-xj92w\" (UID: \"ff1df1f6-e2a6-4e70-b352-f3b15b9255d7\") " pod="openstack/barbican-keystone-listener-7dd9cf6646-xj92w" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.461637 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-f69kg\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.461696 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-config\") pod \"dnsmasq-dns-848cf88cfc-f69kg\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.461749 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-logs\") pod \"barbican-api-648c8d65dd-tpz2s\" (UID: \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\") " pod="openstack/barbican-api-648c8d65dd-tpz2s" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.461788 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-f69kg\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.461811 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snlfn\" (UniqueName: \"kubernetes.io/projected/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-kube-api-access-snlfn\") pod \"barbican-api-648c8d65dd-tpz2s\" (UID: \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\") " pod="openstack/barbican-api-648c8d65dd-tpz2s" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.461840 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-config-data-custom\") pod \"barbican-api-648c8d65dd-tpz2s\" (UID: \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\") " pod="openstack/barbican-api-648c8d65dd-tpz2s" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.461871 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-combined-ca-bundle\") pod \"barbican-api-648c8d65dd-tpz2s\" (UID: \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\") " pod="openstack/barbican-api-648c8d65dd-tpz2s" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.461910 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-f69kg\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.461927 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-f69kg\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.461945 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-config-data\") pod \"barbican-api-648c8d65dd-tpz2s\" (UID: \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\") " pod="openstack/barbican-api-648c8d65dd-tpz2s" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.461963 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9n9tc\" (UniqueName: \"kubernetes.io/projected/82ebb804-a23f-4cb3-8f2b-2cfda6815108-kube-api-access-9n9tc\") pod \"dnsmasq-dns-848cf88cfc-f69kg\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.462498 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-f69kg\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.462936 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-config\") pod \"dnsmasq-dns-848cf88cfc-f69kg\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.463289 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-f69kg\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.463600 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-f69kg\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.464889 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-f69kg\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.472733 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7dd9cf6646-xj92w" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.482378 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9n9tc\" (UniqueName: \"kubernetes.io/projected/82ebb804-a23f-4cb3-8f2b-2cfda6815108-kube-api-access-9n9tc\") pod \"dnsmasq-dns-848cf88cfc-f69kg\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.563691 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-config-data-custom\") pod \"barbican-api-648c8d65dd-tpz2s\" (UID: \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\") " pod="openstack/barbican-api-648c8d65dd-tpz2s" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.563768 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-combined-ca-bundle\") pod \"barbican-api-648c8d65dd-tpz2s\" (UID: \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\") " pod="openstack/barbican-api-648c8d65dd-tpz2s" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.563839 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-config-data\") pod \"barbican-api-648c8d65dd-tpz2s\" (UID: \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\") " pod="openstack/barbican-api-648c8d65dd-tpz2s" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.563974 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-logs\") pod \"barbican-api-648c8d65dd-tpz2s\" (UID: \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\") " pod="openstack/barbican-api-648c8d65dd-tpz2s" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.564030 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snlfn\" (UniqueName: \"kubernetes.io/projected/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-kube-api-access-snlfn\") pod \"barbican-api-648c8d65dd-tpz2s\" (UID: \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\") " pod="openstack/barbican-api-648c8d65dd-tpz2s" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.568726 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-logs\") pod \"barbican-api-648c8d65dd-tpz2s\" (UID: \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\") " pod="openstack/barbican-api-648c8d65dd-tpz2s" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.571280 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-combined-ca-bundle\") pod \"barbican-api-648c8d65dd-tpz2s\" (UID: \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\") " pod="openstack/barbican-api-648c8d65dd-tpz2s" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.571720 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-config-data-custom\") pod \"barbican-api-648c8d65dd-tpz2s\" (UID: \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\") " pod="openstack/barbican-api-648c8d65dd-tpz2s" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.576079 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-config-data\") pod \"barbican-api-648c8d65dd-tpz2s\" (UID: \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\") " pod="openstack/barbican-api-648c8d65dd-tpz2s" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.578934 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.587916 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snlfn\" (UniqueName: \"kubernetes.io/projected/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-kube-api-access-snlfn\") pod \"barbican-api-648c8d65dd-tpz2s\" (UID: \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\") " pod="openstack/barbican-api-648c8d65dd-tpz2s" Dec 13 06:50:09 crc kubenswrapper[5048]: I1213 06:50:09.773578 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-648c8d65dd-tpz2s" Dec 13 06:50:10 crc kubenswrapper[5048]: I1213 06:50:10.831510 5048 generic.go:334] "Generic (PLEG): container finished" podID="8a4a2da1-cc7f-474f-baf7-16c352bd0708" containerID="740fb7bacfa2a3b3b6ad58b4b499b2ec58dc979e363111b4788f11f11437cdd0" exitCode=0 Dec 13 06:50:10 crc kubenswrapper[5048]: I1213 06:50:10.831607 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-9xjhb" event={"ID":"8a4a2da1-cc7f-474f-baf7-16c352bd0708","Type":"ContainerDied","Data":"740fb7bacfa2a3b3b6ad58b4b499b2ec58dc979e363111b4788f11f11437cdd0"} Dec 13 06:50:11 crc kubenswrapper[5048]: I1213 06:50:11.762464 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-94459c6fd-6dkbd"] Dec 13 06:50:11 crc kubenswrapper[5048]: I1213 06:50:11.764029 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:11 crc kubenswrapper[5048]: I1213 06:50:11.766997 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Dec 13 06:50:11 crc kubenswrapper[5048]: I1213 06:50:11.767217 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Dec 13 06:50:11 crc kubenswrapper[5048]: I1213 06:50:11.787843 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-94459c6fd-6dkbd"] Dec 13 06:50:11 crc kubenswrapper[5048]: I1213 06:50:11.917030 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/dca6704a-bfa6-42db-9692-f6b21a2c9e08-public-tls-certs\") pod \"barbican-api-94459c6fd-6dkbd\" (UID: \"dca6704a-bfa6-42db-9692-f6b21a2c9e08\") " pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:11 crc kubenswrapper[5048]: I1213 06:50:11.917097 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dca6704a-bfa6-42db-9692-f6b21a2c9e08-combined-ca-bundle\") pod \"barbican-api-94459c6fd-6dkbd\" (UID: \"dca6704a-bfa6-42db-9692-f6b21a2c9e08\") " pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:11 crc kubenswrapper[5048]: I1213 06:50:11.917134 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dca6704a-bfa6-42db-9692-f6b21a2c9e08-internal-tls-certs\") pod \"barbican-api-94459c6fd-6dkbd\" (UID: \"dca6704a-bfa6-42db-9692-f6b21a2c9e08\") " pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:11 crc kubenswrapper[5048]: I1213 06:50:11.917168 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dca6704a-bfa6-42db-9692-f6b21a2c9e08-config-data-custom\") pod \"barbican-api-94459c6fd-6dkbd\" (UID: \"dca6704a-bfa6-42db-9692-f6b21a2c9e08\") " pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:11 crc kubenswrapper[5048]: I1213 06:50:11.917272 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ngtds\" (UniqueName: \"kubernetes.io/projected/dca6704a-bfa6-42db-9692-f6b21a2c9e08-kube-api-access-ngtds\") pod \"barbican-api-94459c6fd-6dkbd\" (UID: \"dca6704a-bfa6-42db-9692-f6b21a2c9e08\") " pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:11 crc kubenswrapper[5048]: I1213 06:50:11.917350 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dca6704a-bfa6-42db-9692-f6b21a2c9e08-logs\") pod \"barbican-api-94459c6fd-6dkbd\" (UID: \"dca6704a-bfa6-42db-9692-f6b21a2c9e08\") " pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:11 crc kubenswrapper[5048]: I1213 06:50:11.917392 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dca6704a-bfa6-42db-9692-f6b21a2c9e08-config-data\") pod \"barbican-api-94459c6fd-6dkbd\" (UID: \"dca6704a-bfa6-42db-9692-f6b21a2c9e08\") " pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.020376 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dca6704a-bfa6-42db-9692-f6b21a2c9e08-config-data\") pod \"barbican-api-94459c6fd-6dkbd\" (UID: \"dca6704a-bfa6-42db-9692-f6b21a2c9e08\") " pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.020986 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/dca6704a-bfa6-42db-9692-f6b21a2c9e08-public-tls-certs\") pod \"barbican-api-94459c6fd-6dkbd\" (UID: \"dca6704a-bfa6-42db-9692-f6b21a2c9e08\") " pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.021090 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dca6704a-bfa6-42db-9692-f6b21a2c9e08-combined-ca-bundle\") pod \"barbican-api-94459c6fd-6dkbd\" (UID: \"dca6704a-bfa6-42db-9692-f6b21a2c9e08\") " pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.021178 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dca6704a-bfa6-42db-9692-f6b21a2c9e08-internal-tls-certs\") pod \"barbican-api-94459c6fd-6dkbd\" (UID: \"dca6704a-bfa6-42db-9692-f6b21a2c9e08\") " pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.021281 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dca6704a-bfa6-42db-9692-f6b21a2c9e08-config-data-custom\") pod \"barbican-api-94459c6fd-6dkbd\" (UID: \"dca6704a-bfa6-42db-9692-f6b21a2c9e08\") " pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.021350 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ngtds\" (UniqueName: \"kubernetes.io/projected/dca6704a-bfa6-42db-9692-f6b21a2c9e08-kube-api-access-ngtds\") pod \"barbican-api-94459c6fd-6dkbd\" (UID: \"dca6704a-bfa6-42db-9692-f6b21a2c9e08\") " pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.021569 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dca6704a-bfa6-42db-9692-f6b21a2c9e08-logs\") pod \"barbican-api-94459c6fd-6dkbd\" (UID: \"dca6704a-bfa6-42db-9692-f6b21a2c9e08\") " pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.022173 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dca6704a-bfa6-42db-9692-f6b21a2c9e08-logs\") pod \"barbican-api-94459c6fd-6dkbd\" (UID: \"dca6704a-bfa6-42db-9692-f6b21a2c9e08\") " pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.029148 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dca6704a-bfa6-42db-9692-f6b21a2c9e08-combined-ca-bundle\") pod \"barbican-api-94459c6fd-6dkbd\" (UID: \"dca6704a-bfa6-42db-9692-f6b21a2c9e08\") " pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.029220 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/dca6704a-bfa6-42db-9692-f6b21a2c9e08-public-tls-certs\") pod \"barbican-api-94459c6fd-6dkbd\" (UID: \"dca6704a-bfa6-42db-9692-f6b21a2c9e08\") " pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.032282 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dca6704a-bfa6-42db-9692-f6b21a2c9e08-internal-tls-certs\") pod \"barbican-api-94459c6fd-6dkbd\" (UID: \"dca6704a-bfa6-42db-9692-f6b21a2c9e08\") " pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.036288 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dca6704a-bfa6-42db-9692-f6b21a2c9e08-config-data\") pod \"barbican-api-94459c6fd-6dkbd\" (UID: \"dca6704a-bfa6-42db-9692-f6b21a2c9e08\") " pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.050058 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ngtds\" (UniqueName: \"kubernetes.io/projected/dca6704a-bfa6-42db-9692-f6b21a2c9e08-kube-api-access-ngtds\") pod \"barbican-api-94459c6fd-6dkbd\" (UID: \"dca6704a-bfa6-42db-9692-f6b21a2c9e08\") " pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.064099 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dca6704a-bfa6-42db-9692-f6b21a2c9e08-config-data-custom\") pod \"barbican-api-94459c6fd-6dkbd\" (UID: \"dca6704a-bfa6-42db-9692-f6b21a2c9e08\") " pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.087845 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.750835 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-9xjhb" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.860990 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-9xjhb" event={"ID":"8a4a2da1-cc7f-474f-baf7-16c352bd0708","Type":"ContainerDied","Data":"65610ced9a4b11ea57219dcbd3829663408aa05a64cf2a5a11c52f33f2321e15"} Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.861035 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-9xjhb" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.861038 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="65610ced9a4b11ea57219dcbd3829663408aa05a64cf2a5a11c52f33f2321e15" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.938536 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8a4a2da1-cc7f-474f-baf7-16c352bd0708-etc-machine-id\") pod \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.938947 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-config-data\") pod \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.939024 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-db-sync-config-data\") pod \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.939032 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8a4a2da1-cc7f-474f-baf7-16c352bd0708-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "8a4a2da1-cc7f-474f-baf7-16c352bd0708" (UID: "8a4a2da1-cc7f-474f-baf7-16c352bd0708"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.939111 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n59rd\" (UniqueName: \"kubernetes.io/projected/8a4a2da1-cc7f-474f-baf7-16c352bd0708-kube-api-access-n59rd\") pod \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.939149 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-scripts\") pod \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.939191 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-combined-ca-bundle\") pod \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\" (UID: \"8a4a2da1-cc7f-474f-baf7-16c352bd0708\") " Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.939572 5048 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8a4a2da1-cc7f-474f-baf7-16c352bd0708-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.944711 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "8a4a2da1-cc7f-474f-baf7-16c352bd0708" (UID: "8a4a2da1-cc7f-474f-baf7-16c352bd0708"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.946971 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a4a2da1-cc7f-474f-baf7-16c352bd0708-kube-api-access-n59rd" (OuterVolumeSpecName: "kube-api-access-n59rd") pod "8a4a2da1-cc7f-474f-baf7-16c352bd0708" (UID: "8a4a2da1-cc7f-474f-baf7-16c352bd0708"). InnerVolumeSpecName "kube-api-access-n59rd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.949540 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-scripts" (OuterVolumeSpecName: "scripts") pod "8a4a2da1-cc7f-474f-baf7-16c352bd0708" (UID: "8a4a2da1-cc7f-474f-baf7-16c352bd0708"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:12 crc kubenswrapper[5048]: I1213 06:50:12.966161 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8a4a2da1-cc7f-474f-baf7-16c352bd0708" (UID: "8a4a2da1-cc7f-474f-baf7-16c352bd0708"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.006228 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-config-data" (OuterVolumeSpecName: "config-data") pod "8a4a2da1-cc7f-474f-baf7-16c352bd0708" (UID: "8a4a2da1-cc7f-474f-baf7-16c352bd0708"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.044135 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.044172 5048 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.044184 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n59rd\" (UniqueName: \"kubernetes.io/projected/8a4a2da1-cc7f-474f-baf7-16c352bd0708-kube-api-access-n59rd\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.044197 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.044207 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a4a2da1-cc7f-474f-baf7-16c352bd0708-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.070530 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Dec 13 06:50:13 crc kubenswrapper[5048]: E1213 06:50:13.071216 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a4a2da1-cc7f-474f-baf7-16c352bd0708" containerName="cinder-db-sync" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.071236 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a4a2da1-cc7f-474f-baf7-16c352bd0708" containerName="cinder-db-sync" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.071478 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a4a2da1-cc7f-474f-baf7-16c352bd0708" containerName="cinder-db-sync" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.072536 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.079143 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.085885 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.151217 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-f69kg"] Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.231411 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-qhcq5"] Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.237663 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.251377 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p484c\" (UniqueName: \"kubernetes.io/projected/b5edcb36-9ff8-4be5-9891-75151b570837-kube-api-access-p484c\") pod \"cinder-scheduler-0\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.251679 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b5edcb36-9ff8-4be5-9891-75151b570837-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.251802 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-scripts\") pod \"cinder-scheduler-0\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.251896 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-config-data\") pod \"cinder-scheduler-0\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.251980 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.252082 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.279040 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-qhcq5"] Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.351746 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.353734 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.354973 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8sx6f\" (UniqueName: \"kubernetes.io/projected/a2040734-c367-448b-a7ce-762b168b35c0-kube-api-access-8sx6f\") pod \"dnsmasq-dns-6578955fd5-qhcq5\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.355051 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p484c\" (UniqueName: \"kubernetes.io/projected/b5edcb36-9ff8-4be5-9891-75151b570837-kube-api-access-p484c\") pod \"cinder-scheduler-0\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.355109 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-qhcq5\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.355165 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b5edcb36-9ff8-4be5-9891-75151b570837-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.355326 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-qhcq5\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.355377 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-scripts\") pod \"cinder-scheduler-0\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.355408 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-config-data\") pod \"cinder-scheduler-0\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.355429 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.355512 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.355540 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-dns-svc\") pod \"dnsmasq-dns-6578955fd5-qhcq5\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.355617 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b5edcb36-9ff8-4be5-9891-75151b570837-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.355658 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-qhcq5\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.357990 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.360049 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.362686 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-config\") pod \"dnsmasq-dns-6578955fd5-qhcq5\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.363385 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.364229 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-scripts\") pod \"cinder-scheduler-0\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.370014 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.424064 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-config-data\") pod \"cinder-scheduler-0\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.434695 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p484c\" (UniqueName: \"kubernetes.io/projected/b5edcb36-9ff8-4be5-9891-75151b570837-kube-api-access-p484c\") pod \"cinder-scheduler-0\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.465318 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vw66\" (UniqueName: \"kubernetes.io/projected/a670fb50-d533-4a39-a9b5-3b350ab332ed-kube-api-access-2vw66\") pod \"cinder-api-0\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.465574 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-dns-svc\") pod \"dnsmasq-dns-6578955fd5-qhcq5\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.465674 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-config-data\") pod \"cinder-api-0\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.465737 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-qhcq5\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.465817 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-config-data-custom\") pod \"cinder-api-0\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.465907 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-config\") pod \"dnsmasq-dns-6578955fd5-qhcq5\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.465979 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8sx6f\" (UniqueName: \"kubernetes.io/projected/a2040734-c367-448b-a7ce-762b168b35c0-kube-api-access-8sx6f\") pod \"dnsmasq-dns-6578955fd5-qhcq5\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.466040 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-scripts\") pod \"cinder-api-0\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.466102 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a670fb50-d533-4a39-a9b5-3b350ab332ed-etc-machine-id\") pod \"cinder-api-0\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.466209 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-qhcq5\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.466348 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a670fb50-d533-4a39-a9b5-3b350ab332ed-logs\") pod \"cinder-api-0\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.466452 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.466539 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-qhcq5\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.466850 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-config\") pod \"dnsmasq-dns-6578955fd5-qhcq5\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.466860 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-dns-svc\") pod \"dnsmasq-dns-6578955fd5-qhcq5\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.469592 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-qhcq5\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.470021 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-qhcq5\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.470308 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-qhcq5\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.486641 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8sx6f\" (UniqueName: \"kubernetes.io/projected/a2040734-c367-448b-a7ce-762b168b35c0-kube-api-access-8sx6f\") pod \"dnsmasq-dns-6578955fd5-qhcq5\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.568413 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a670fb50-d533-4a39-a9b5-3b350ab332ed-logs\") pod \"cinder-api-0\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.568733 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.568813 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vw66\" (UniqueName: \"kubernetes.io/projected/a670fb50-d533-4a39-a9b5-3b350ab332ed-kube-api-access-2vw66\") pod \"cinder-api-0\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.568853 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-config-data\") pod \"cinder-api-0\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.568877 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-config-data-custom\") pod \"cinder-api-0\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.568890 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a670fb50-d533-4a39-a9b5-3b350ab332ed-logs\") pod \"cinder-api-0\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.568922 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-scripts\") pod \"cinder-api-0\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.568941 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a670fb50-d533-4a39-a9b5-3b350ab332ed-etc-machine-id\") pod \"cinder-api-0\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.569056 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a670fb50-d533-4a39-a9b5-3b350ab332ed-etc-machine-id\") pod \"cinder-api-0\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.573311 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.573732 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-config-data\") pod \"cinder-api-0\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.575543 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-config-data-custom\") pod \"cinder-api-0\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.575891 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-scripts\") pod \"cinder-api-0\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.579064 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.585649 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vw66\" (UniqueName: \"kubernetes.io/projected/a670fb50-d533-4a39-a9b5-3b350ab332ed-kube-api-access-2vw66\") pod \"cinder-api-0\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.625609 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 13 06:50:13 crc kubenswrapper[5048]: I1213 06:50:13.721555 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 13 06:50:14 crc kubenswrapper[5048]: I1213 06:50:14.887348 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7888dc9665-xrmff"] Dec 13 06:50:15 crc kubenswrapper[5048]: W1213 06:50:15.249125 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podec726365_6964_4e11_942d_d57482573f01.slice/crio-a768e7e0fe195e14fd92681109164e5de6471d023e29582fbbc9ae5f15e2da03 WatchSource:0}: Error finding container a768e7e0fe195e14fd92681109164e5de6471d023e29582fbbc9ae5f15e2da03: Status 404 returned error can't find the container with id a768e7e0fe195e14fd92681109164e5de6471d023e29582fbbc9ae5f15e2da03 Dec 13 06:50:15 crc kubenswrapper[5048]: I1213 06:50:15.661808 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Dec 13 06:50:15 crc kubenswrapper[5048]: I1213 06:50:15.815139 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-7dd9cf6646-xj92w"] Dec 13 06:50:15 crc kubenswrapper[5048]: I1213 06:50:15.917758 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" containerName="ceilometer-central-agent" containerID="cri-o://03d5d3b195f7ca1dd1901cf64b9fb2ba462e881422823475e43e1e4bb15b4f76" gracePeriod=30 Dec 13 06:50:15 crc kubenswrapper[5048]: I1213 06:50:15.918026 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" containerName="proxy-httpd" containerID="cri-o://dcaa41ecef403c2cc5003430aa31dcacd971aacf80d5fca4cabc0e4a0c57b682" gracePeriod=30 Dec 13 06:50:15 crc kubenswrapper[5048]: I1213 06:50:15.918182 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" containerName="sg-core" containerID="cri-o://a018e2fc6d16a78d973ff5dbf2bbd7e1be2eb4ee7c9c05b0c8b477d00a261cff" gracePeriod=30 Dec 13 06:50:15 crc kubenswrapper[5048]: I1213 06:50:15.918201 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" containerName="ceilometer-notification-agent" containerID="cri-o://69d56a5567066958fafe92b1b8664aff26f228b91e5c1972493bb8b42cce91b9" gracePeriod=30 Dec 13 06:50:15 crc kubenswrapper[5048]: I1213 06:50:15.918452 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b","Type":"ContainerStarted","Data":"dcaa41ecef403c2cc5003430aa31dcacd971aacf80d5fca4cabc0e4a0c57b682"} Dec 13 06:50:15 crc kubenswrapper[5048]: I1213 06:50:15.918502 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 13 06:50:15 crc kubenswrapper[5048]: I1213 06:50:15.973096 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-f69kg"] Dec 13 06:50:15 crc kubenswrapper[5048]: I1213 06:50:15.974661 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7dd9cf6646-xj92w" event={"ID":"ff1df1f6-e2a6-4e70-b352-f3b15b9255d7","Type":"ContainerStarted","Data":"20bd5923c00dbf4695568f15f07b2f7bf4d2788652d754479033bb6d3be328e2"} Dec 13 06:50:16 crc kubenswrapper[5048]: I1213 06:50:15.998622 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.059570182 podStartE2EDuration="1m1.998600156s" podCreationTimestamp="2025-12-13 06:49:14 +0000 UTC" firstStartedPulling="2025-12-13 06:49:16.448424543 +0000 UTC m=+1190.315019124" lastFinishedPulling="2025-12-13 06:50:15.387454517 +0000 UTC m=+1249.254049098" observedRunningTime="2025-12-13 06:50:15.946618636 +0000 UTC m=+1249.813213217" watchObservedRunningTime="2025-12-13 06:50:15.998600156 +0000 UTC m=+1249.865194737" Dec 13 06:50:16 crc kubenswrapper[5048]: I1213 06:50:15.999858 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7888dc9665-xrmff" event={"ID":"ec726365-6964-4e11-942d-d57482573f01","Type":"ContainerStarted","Data":"a768e7e0fe195e14fd92681109164e5de6471d023e29582fbbc9ae5f15e2da03"} Dec 13 06:50:16 crc kubenswrapper[5048]: I1213 06:50:16.012703 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-648c8d65dd-tpz2s"] Dec 13 06:50:16 crc kubenswrapper[5048]: I1213 06:50:16.069944 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 13 06:50:16 crc kubenswrapper[5048]: I1213 06:50:16.110202 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Dec 13 06:50:16 crc kubenswrapper[5048]: I1213 06:50:16.257449 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-94459c6fd-6dkbd"] Dec 13 06:50:16 crc kubenswrapper[5048]: W1213 06:50:16.290285 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddca6704a_bfa6_42db_9692_f6b21a2c9e08.slice/crio-deb9335d24ae385011d4a438799d4cd335c7ec7f26b7d44e34011797aa91d81f WatchSource:0}: Error finding container deb9335d24ae385011d4a438799d4cd335c7ec7f26b7d44e34011797aa91d81f: Status 404 returned error can't find the container with id deb9335d24ae385011d4a438799d4cd335c7ec7f26b7d44e34011797aa91d81f Dec 13 06:50:16 crc kubenswrapper[5048]: I1213 06:50:16.372496 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-qhcq5"] Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.009651 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" event={"ID":"a2040734-c367-448b-a7ce-762b168b35c0","Type":"ContainerStarted","Data":"fef5f6e239832aac522e81afb92fe5e16fbb5fd20a1c0569000040fcc92cf29f"} Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.011081 5048 generic.go:334] "Generic (PLEG): container finished" podID="82ebb804-a23f-4cb3-8f2b-2cfda6815108" containerID="3dc9c412bd01f6da2a7b3608c820e498c79610cd5a14974425bef813c68ee362" exitCode=0 Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.011136 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" event={"ID":"82ebb804-a23f-4cb3-8f2b-2cfda6815108","Type":"ContainerDied","Data":"3dc9c412bd01f6da2a7b3608c820e498c79610cd5a14974425bef813c68ee362"} Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.011156 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" event={"ID":"82ebb804-a23f-4cb3-8f2b-2cfda6815108","Type":"ContainerStarted","Data":"84bce9e4e0067928fff30784b128bcdc516b0bffdfdeca10fb436d146af261fc"} Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.016887 5048 generic.go:334] "Generic (PLEG): container finished" podID="2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" containerID="a018e2fc6d16a78d973ff5dbf2bbd7e1be2eb4ee7c9c05b0c8b477d00a261cff" exitCode=2 Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.016916 5048 generic.go:334] "Generic (PLEG): container finished" podID="2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" containerID="03d5d3b195f7ca1dd1901cf64b9fb2ba462e881422823475e43e1e4bb15b4f76" exitCode=0 Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.016923 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b","Type":"ContainerDied","Data":"a018e2fc6d16a78d973ff5dbf2bbd7e1be2eb4ee7c9c05b0c8b477d00a261cff"} Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.016957 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b","Type":"ContainerDied","Data":"03d5d3b195f7ca1dd1901cf64b9fb2ba462e881422823475e43e1e4bb15b4f76"} Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.018289 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b5edcb36-9ff8-4be5-9891-75151b570837","Type":"ContainerStarted","Data":"51f41a66093ad244aa9b0e28e7261be5337173ed365d1eacb8c75902947cf6c1"} Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.020029 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-94459c6fd-6dkbd" event={"ID":"dca6704a-bfa6-42db-9692-f6b21a2c9e08","Type":"ContainerStarted","Data":"4660867b9e5b1b336fb61a771ffe70a4b6ed9d1d4e5369c0e5b55b7d71a97c8c"} Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.020054 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-94459c6fd-6dkbd" event={"ID":"dca6704a-bfa6-42db-9692-f6b21a2c9e08","Type":"ContainerStarted","Data":"deb9335d24ae385011d4a438799d4cd335c7ec7f26b7d44e34011797aa91d81f"} Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.033003 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a670fb50-d533-4a39-a9b5-3b350ab332ed","Type":"ContainerStarted","Data":"ad786c4407a27862de7df7c25224a7d8fbb812c7c04d79a22fd92e89f15eca8f"} Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.033041 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a670fb50-d533-4a39-a9b5-3b350ab332ed","Type":"ContainerStarted","Data":"90348e84d75c554a94f9a4da23c7ac99999db6a91969dd33ed7761dd2d247ff8"} Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.036221 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-648c8d65dd-tpz2s" event={"ID":"0fb59ccf-cf8a-47f3-ad52-283c568bfb44","Type":"ContainerStarted","Data":"a600dd9724e454bf8a954ff8cf408d3f05833bb1b1e2f23754f8dc6a2632dde5"} Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.036258 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-648c8d65dd-tpz2s" event={"ID":"0fb59ccf-cf8a-47f3-ad52-283c568bfb44","Type":"ContainerStarted","Data":"1899709674bb106a0c0bd4e1d5e157617b213223d0bacf552e443a8af15566fe"} Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.036274 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-648c8d65dd-tpz2s" event={"ID":"0fb59ccf-cf8a-47f3-ad52-283c568bfb44","Type":"ContainerStarted","Data":"889cc4a180cd96338ff5a016fa96b80dff496fa3d6fe7fcdbba8edf5ca1de8b8"} Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.036321 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-648c8d65dd-tpz2s" Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.036349 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-648c8d65dd-tpz2s" Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.065133 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-648c8d65dd-tpz2s" podStartSLOduration=8.065115718 podStartE2EDuration="8.065115718s" podCreationTimestamp="2025-12-13 06:50:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:50:17.062041613 +0000 UTC m=+1250.928636214" watchObservedRunningTime="2025-12-13 06:50:17.065115718 +0000 UTC m=+1250.931710299" Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.530290 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.668138 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-dns-svc\") pod \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.668239 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-ovsdbserver-nb\") pod \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.668283 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-config\") pod \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.668362 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9n9tc\" (UniqueName: \"kubernetes.io/projected/82ebb804-a23f-4cb3-8f2b-2cfda6815108-kube-api-access-9n9tc\") pod \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.668400 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-ovsdbserver-sb\") pod \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.668521 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-dns-swift-storage-0\") pod \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\" (UID: \"82ebb804-a23f-4cb3-8f2b-2cfda6815108\") " Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.677664 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82ebb804-a23f-4cb3-8f2b-2cfda6815108-kube-api-access-9n9tc" (OuterVolumeSpecName: "kube-api-access-9n9tc") pod "82ebb804-a23f-4cb3-8f2b-2cfda6815108" (UID: "82ebb804-a23f-4cb3-8f2b-2cfda6815108"). InnerVolumeSpecName "kube-api-access-9n9tc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.702541 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "82ebb804-a23f-4cb3-8f2b-2cfda6815108" (UID: "82ebb804-a23f-4cb3-8f2b-2cfda6815108"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.713671 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-config" (OuterVolumeSpecName: "config") pod "82ebb804-a23f-4cb3-8f2b-2cfda6815108" (UID: "82ebb804-a23f-4cb3-8f2b-2cfda6815108"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.719899 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "82ebb804-a23f-4cb3-8f2b-2cfda6815108" (UID: "82ebb804-a23f-4cb3-8f2b-2cfda6815108"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.720172 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "82ebb804-a23f-4cb3-8f2b-2cfda6815108" (UID: "82ebb804-a23f-4cb3-8f2b-2cfda6815108"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.724080 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "82ebb804-a23f-4cb3-8f2b-2cfda6815108" (UID: "82ebb804-a23f-4cb3-8f2b-2cfda6815108"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.770850 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.770887 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.770897 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9n9tc\" (UniqueName: \"kubernetes.io/projected/82ebb804-a23f-4cb3-8f2b-2cfda6815108-kube-api-access-9n9tc\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.770907 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.770915 5048 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:17 crc kubenswrapper[5048]: I1213 06:50:17.770924 5048 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82ebb804-a23f-4cb3-8f2b-2cfda6815108-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:18 crc kubenswrapper[5048]: I1213 06:50:18.065250 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a670fb50-d533-4a39-a9b5-3b350ab332ed","Type":"ContainerStarted","Data":"c8b8a7ddb2f5ed20ad5d7a6e5b9436f965575d52fe90cd6267a8f8bce880234a"} Dec 13 06:50:18 crc kubenswrapper[5048]: I1213 06:50:18.065684 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="a670fb50-d533-4a39-a9b5-3b350ab332ed" containerName="cinder-api-log" containerID="cri-o://ad786c4407a27862de7df7c25224a7d8fbb812c7c04d79a22fd92e89f15eca8f" gracePeriod=30 Dec 13 06:50:18 crc kubenswrapper[5048]: I1213 06:50:18.065946 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Dec 13 06:50:18 crc kubenswrapper[5048]: I1213 06:50:18.066213 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="a670fb50-d533-4a39-a9b5-3b350ab332ed" containerName="cinder-api" containerID="cri-o://c8b8a7ddb2f5ed20ad5d7a6e5b9436f965575d52fe90cd6267a8f8bce880234a" gracePeriod=30 Dec 13 06:50:18 crc kubenswrapper[5048]: I1213 06:50:18.069057 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7888dc9665-xrmff" event={"ID":"ec726365-6964-4e11-942d-d57482573f01","Type":"ContainerStarted","Data":"362f6a42a4c8d05afaaad6731a9410d0a67a84387a59a3e376d3d9c277b53319"} Dec 13 06:50:18 crc kubenswrapper[5048]: I1213 06:50:18.069089 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7888dc9665-xrmff" event={"ID":"ec726365-6964-4e11-942d-d57482573f01","Type":"ContainerStarted","Data":"7f465593c6bd07e6f2cc24f105dea453d3e598178e3de863e5e60cd991605e68"} Dec 13 06:50:18 crc kubenswrapper[5048]: I1213 06:50:18.082144 5048 generic.go:334] "Generic (PLEG): container finished" podID="a2040734-c367-448b-a7ce-762b168b35c0" containerID="c5f8ffa3ddf51746ecbf0912f6ff78e57d86772929748881e281351d8c2bf856" exitCode=0 Dec 13 06:50:18 crc kubenswrapper[5048]: I1213 06:50:18.082228 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" event={"ID":"a2040734-c367-448b-a7ce-762b168b35c0","Type":"ContainerDied","Data":"c5f8ffa3ddf51746ecbf0912f6ff78e57d86772929748881e281351d8c2bf856"} Dec 13 06:50:18 crc kubenswrapper[5048]: I1213 06:50:18.105256 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=5.105242459 podStartE2EDuration="5.105242459s" podCreationTimestamp="2025-12-13 06:50:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:50:18.095522973 +0000 UTC m=+1251.962117554" watchObservedRunningTime="2025-12-13 06:50:18.105242459 +0000 UTC m=+1251.971837040" Dec 13 06:50:18 crc kubenswrapper[5048]: I1213 06:50:18.144276 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" event={"ID":"82ebb804-a23f-4cb3-8f2b-2cfda6815108","Type":"ContainerDied","Data":"84bce9e4e0067928fff30784b128bcdc516b0bffdfdeca10fb436d146af261fc"} Dec 13 06:50:18 crc kubenswrapper[5048]: I1213 06:50:18.144333 5048 scope.go:117] "RemoveContainer" containerID="3dc9c412bd01f6da2a7b3608c820e498c79610cd5a14974425bef813c68ee362" Dec 13 06:50:18 crc kubenswrapper[5048]: I1213 06:50:18.144513 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-f69kg" Dec 13 06:50:18 crc kubenswrapper[5048]: I1213 06:50:18.203206 5048 generic.go:334] "Generic (PLEG): container finished" podID="2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" containerID="69d56a5567066958fafe92b1b8664aff26f228b91e5c1972493bb8b42cce91b9" exitCode=0 Dec 13 06:50:18 crc kubenswrapper[5048]: I1213 06:50:18.203317 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b","Type":"ContainerDied","Data":"69d56a5567066958fafe92b1b8664aff26f228b91e5c1972493bb8b42cce91b9"} Dec 13 06:50:18 crc kubenswrapper[5048]: I1213 06:50:18.210271 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-94459c6fd-6dkbd" event={"ID":"dca6704a-bfa6-42db-9692-f6b21a2c9e08","Type":"ContainerStarted","Data":"847b812a7f833096f6a17b462a4b666d13f5837b643986003d50234e67e497a6"} Dec 13 06:50:18 crc kubenswrapper[5048]: I1213 06:50:18.210322 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:18 crc kubenswrapper[5048]: I1213 06:50:18.210365 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:18 crc kubenswrapper[5048]: I1213 06:50:18.289051 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-7888dc9665-xrmff" podStartSLOduration=7.613280552 podStartE2EDuration="9.28902764s" podCreationTimestamp="2025-12-13 06:50:09 +0000 UTC" firstStartedPulling="2025-12-13 06:50:15.297784027 +0000 UTC m=+1249.164378608" lastFinishedPulling="2025-12-13 06:50:16.973531115 +0000 UTC m=+1250.840125696" observedRunningTime="2025-12-13 06:50:18.211986915 +0000 UTC m=+1252.078581536" watchObservedRunningTime="2025-12-13 06:50:18.28902764 +0000 UTC m=+1252.155622221" Dec 13 06:50:18 crc kubenswrapper[5048]: I1213 06:50:18.323980 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-94459c6fd-6dkbd" podStartSLOduration=7.323959435 podStartE2EDuration="7.323959435s" podCreationTimestamp="2025-12-13 06:50:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:50:18.274104832 +0000 UTC m=+1252.140699423" watchObservedRunningTime="2025-12-13 06:50:18.323959435 +0000 UTC m=+1252.190554016" Dec 13 06:50:18 crc kubenswrapper[5048]: I1213 06:50:18.452683 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-f69kg"] Dec 13 06:50:18 crc kubenswrapper[5048]: I1213 06:50:18.460209 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-f69kg"] Dec 13 06:50:18 crc kubenswrapper[5048]: I1213 06:50:18.591452 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82ebb804-a23f-4cb3-8f2b-2cfda6815108" path="/var/lib/kubelet/pods/82ebb804-a23f-4cb3-8f2b-2cfda6815108/volumes" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.003999 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.114024 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a670fb50-d533-4a39-a9b5-3b350ab332ed-logs\") pod \"a670fb50-d533-4a39-a9b5-3b350ab332ed\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.114173 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a670fb50-d533-4a39-a9b5-3b350ab332ed-etc-machine-id\") pod \"a670fb50-d533-4a39-a9b5-3b350ab332ed\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.114197 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-config-data\") pod \"a670fb50-d533-4a39-a9b5-3b350ab332ed\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.114226 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vw66\" (UniqueName: \"kubernetes.io/projected/a670fb50-d533-4a39-a9b5-3b350ab332ed-kube-api-access-2vw66\") pod \"a670fb50-d533-4a39-a9b5-3b350ab332ed\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.114244 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-combined-ca-bundle\") pod \"a670fb50-d533-4a39-a9b5-3b350ab332ed\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.114326 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-config-data-custom\") pod \"a670fb50-d533-4a39-a9b5-3b350ab332ed\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.114397 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-scripts\") pod \"a670fb50-d533-4a39-a9b5-3b350ab332ed\" (UID: \"a670fb50-d533-4a39-a9b5-3b350ab332ed\") " Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.114605 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a670fb50-d533-4a39-a9b5-3b350ab332ed-logs" (OuterVolumeSpecName: "logs") pod "a670fb50-d533-4a39-a9b5-3b350ab332ed" (UID: "a670fb50-d533-4a39-a9b5-3b350ab332ed"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.114856 5048 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a670fb50-d533-4a39-a9b5-3b350ab332ed-logs\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.116701 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a670fb50-d533-4a39-a9b5-3b350ab332ed-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "a670fb50-d533-4a39-a9b5-3b350ab332ed" (UID: "a670fb50-d533-4a39-a9b5-3b350ab332ed"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.119380 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-scripts" (OuterVolumeSpecName: "scripts") pod "a670fb50-d533-4a39-a9b5-3b350ab332ed" (UID: "a670fb50-d533-4a39-a9b5-3b350ab332ed"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.119586 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a670fb50-d533-4a39-a9b5-3b350ab332ed-kube-api-access-2vw66" (OuterVolumeSpecName: "kube-api-access-2vw66") pod "a670fb50-d533-4a39-a9b5-3b350ab332ed" (UID: "a670fb50-d533-4a39-a9b5-3b350ab332ed"). InnerVolumeSpecName "kube-api-access-2vw66". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.124078 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a670fb50-d533-4a39-a9b5-3b350ab332ed" (UID: "a670fb50-d533-4a39-a9b5-3b350ab332ed"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.150553 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a670fb50-d533-4a39-a9b5-3b350ab332ed" (UID: "a670fb50-d533-4a39-a9b5-3b350ab332ed"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.180349 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-config-data" (OuterVolumeSpecName: "config-data") pod "a670fb50-d533-4a39-a9b5-3b350ab332ed" (UID: "a670fb50-d533-4a39-a9b5-3b350ab332ed"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.216902 5048 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a670fb50-d533-4a39-a9b5-3b350ab332ed-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.216939 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.216950 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vw66\" (UniqueName: \"kubernetes.io/projected/a670fb50-d533-4a39-a9b5-3b350ab332ed-kube-api-access-2vw66\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.216964 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.216977 5048 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.216987 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a670fb50-d533-4a39-a9b5-3b350ab332ed-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.233067 5048 generic.go:334] "Generic (PLEG): container finished" podID="a670fb50-d533-4a39-a9b5-3b350ab332ed" containerID="c8b8a7ddb2f5ed20ad5d7a6e5b9436f965575d52fe90cd6267a8f8bce880234a" exitCode=0 Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.233098 5048 generic.go:334] "Generic (PLEG): container finished" podID="a670fb50-d533-4a39-a9b5-3b350ab332ed" containerID="ad786c4407a27862de7df7c25224a7d8fbb812c7c04d79a22fd92e89f15eca8f" exitCode=143 Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.233169 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a670fb50-d533-4a39-a9b5-3b350ab332ed","Type":"ContainerDied","Data":"c8b8a7ddb2f5ed20ad5d7a6e5b9436f965575d52fe90cd6267a8f8bce880234a"} Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.233222 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a670fb50-d533-4a39-a9b5-3b350ab332ed","Type":"ContainerDied","Data":"ad786c4407a27862de7df7c25224a7d8fbb812c7c04d79a22fd92e89f15eca8f"} Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.233235 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a670fb50-d533-4a39-a9b5-3b350ab332ed","Type":"ContainerDied","Data":"90348e84d75c554a94f9a4da23c7ac99999db6a91969dd33ed7761dd2d247ff8"} Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.233253 5048 scope.go:117] "RemoveContainer" containerID="c8b8a7ddb2f5ed20ad5d7a6e5b9436f965575d52fe90cd6267a8f8bce880234a" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.233368 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.240195 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.244233 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" event={"ID":"a2040734-c367-448b-a7ce-762b168b35c0","Type":"ContainerStarted","Data":"05fa0bd6ec99bd6dc21bd959fcc82c9750715f411ddc755c2126da1d1d995b25"} Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.245291 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.252491 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7dd9cf6646-xj92w" event={"ID":"ff1df1f6-e2a6-4e70-b352-f3b15b9255d7","Type":"ContainerStarted","Data":"76b50c79522908f2a3bfb5c5168c81e5718b8d6bbccf2fefdcb113efec363b43"} Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.252532 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7dd9cf6646-xj92w" event={"ID":"ff1df1f6-e2a6-4e70-b352-f3b15b9255d7","Type":"ContainerStarted","Data":"ffc9e0f4339e111be5e03377caf3bf2de5f0fe9b78fd4a2b1313cbec45e43234"} Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.259562 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b5edcb36-9ff8-4be5-9891-75151b570837","Type":"ContainerStarted","Data":"152aa3cf0026050ddb4648f5faf2d81e82a9f98df5555d4866abd43f6646003e"} Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.295645 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-7dd9cf6646-xj92w" podStartSLOduration=7.8808623220000005 podStartE2EDuration="10.295625324s" podCreationTimestamp="2025-12-13 06:50:09 +0000 UTC" firstStartedPulling="2025-12-13 06:50:15.820928151 +0000 UTC m=+1249.687522732" lastFinishedPulling="2025-12-13 06:50:18.235691143 +0000 UTC m=+1252.102285734" observedRunningTime="2025-12-13 06:50:19.292474508 +0000 UTC m=+1253.159069089" watchObservedRunningTime="2025-12-13 06:50:19.295625324 +0000 UTC m=+1253.162219905" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.320762 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" podStartSLOduration=6.320741311 podStartE2EDuration="6.320741311s" podCreationTimestamp="2025-12-13 06:50:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:50:19.317830541 +0000 UTC m=+1253.184425132" watchObservedRunningTime="2025-12-13 06:50:19.320741311 +0000 UTC m=+1253.187335892" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.339712 5048 scope.go:117] "RemoveContainer" containerID="ad786c4407a27862de7df7c25224a7d8fbb812c7c04d79a22fd92e89f15eca8f" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.360506 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.380592 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.392429 5048 scope.go:117] "RemoveContainer" containerID="c8b8a7ddb2f5ed20ad5d7a6e5b9436f965575d52fe90cd6267a8f8bce880234a" Dec 13 06:50:19 crc kubenswrapper[5048]: E1213 06:50:19.396356 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c8b8a7ddb2f5ed20ad5d7a6e5b9436f965575d52fe90cd6267a8f8bce880234a\": container with ID starting with c8b8a7ddb2f5ed20ad5d7a6e5b9436f965575d52fe90cd6267a8f8bce880234a not found: ID does not exist" containerID="c8b8a7ddb2f5ed20ad5d7a6e5b9436f965575d52fe90cd6267a8f8bce880234a" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.396389 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8b8a7ddb2f5ed20ad5d7a6e5b9436f965575d52fe90cd6267a8f8bce880234a"} err="failed to get container status \"c8b8a7ddb2f5ed20ad5d7a6e5b9436f965575d52fe90cd6267a8f8bce880234a\": rpc error: code = NotFound desc = could not find container \"c8b8a7ddb2f5ed20ad5d7a6e5b9436f965575d52fe90cd6267a8f8bce880234a\": container with ID starting with c8b8a7ddb2f5ed20ad5d7a6e5b9436f965575d52fe90cd6267a8f8bce880234a not found: ID does not exist" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.396409 5048 scope.go:117] "RemoveContainer" containerID="ad786c4407a27862de7df7c25224a7d8fbb812c7c04d79a22fd92e89f15eca8f" Dec 13 06:50:19 crc kubenswrapper[5048]: E1213 06:50:19.396677 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad786c4407a27862de7df7c25224a7d8fbb812c7c04d79a22fd92e89f15eca8f\": container with ID starting with ad786c4407a27862de7df7c25224a7d8fbb812c7c04d79a22fd92e89f15eca8f not found: ID does not exist" containerID="ad786c4407a27862de7df7c25224a7d8fbb812c7c04d79a22fd92e89f15eca8f" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.396693 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad786c4407a27862de7df7c25224a7d8fbb812c7c04d79a22fd92e89f15eca8f"} err="failed to get container status \"ad786c4407a27862de7df7c25224a7d8fbb812c7c04d79a22fd92e89f15eca8f\": rpc error: code = NotFound desc = could not find container \"ad786c4407a27862de7df7c25224a7d8fbb812c7c04d79a22fd92e89f15eca8f\": container with ID starting with ad786c4407a27862de7df7c25224a7d8fbb812c7c04d79a22fd92e89f15eca8f not found: ID does not exist" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.396705 5048 scope.go:117] "RemoveContainer" containerID="c8b8a7ddb2f5ed20ad5d7a6e5b9436f965575d52fe90cd6267a8f8bce880234a" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.396905 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8b8a7ddb2f5ed20ad5d7a6e5b9436f965575d52fe90cd6267a8f8bce880234a"} err="failed to get container status \"c8b8a7ddb2f5ed20ad5d7a6e5b9436f965575d52fe90cd6267a8f8bce880234a\": rpc error: code = NotFound desc = could not find container \"c8b8a7ddb2f5ed20ad5d7a6e5b9436f965575d52fe90cd6267a8f8bce880234a\": container with ID starting with c8b8a7ddb2f5ed20ad5d7a6e5b9436f965575d52fe90cd6267a8f8bce880234a not found: ID does not exist" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.396919 5048 scope.go:117] "RemoveContainer" containerID="ad786c4407a27862de7df7c25224a7d8fbb812c7c04d79a22fd92e89f15eca8f" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.407013 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad786c4407a27862de7df7c25224a7d8fbb812c7c04d79a22fd92e89f15eca8f"} err="failed to get container status \"ad786c4407a27862de7df7c25224a7d8fbb812c7c04d79a22fd92e89f15eca8f\": rpc error: code = NotFound desc = could not find container \"ad786c4407a27862de7df7c25224a7d8fbb812c7c04d79a22fd92e89f15eca8f\": container with ID starting with ad786c4407a27862de7df7c25224a7d8fbb812c7c04d79a22fd92e89f15eca8f not found: ID does not exist" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.415982 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Dec 13 06:50:19 crc kubenswrapper[5048]: E1213 06:50:19.416387 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a670fb50-d533-4a39-a9b5-3b350ab332ed" containerName="cinder-api" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.416406 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="a670fb50-d533-4a39-a9b5-3b350ab332ed" containerName="cinder-api" Dec 13 06:50:19 crc kubenswrapper[5048]: E1213 06:50:19.416417 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a670fb50-d533-4a39-a9b5-3b350ab332ed" containerName="cinder-api-log" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.416424 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="a670fb50-d533-4a39-a9b5-3b350ab332ed" containerName="cinder-api-log" Dec 13 06:50:19 crc kubenswrapper[5048]: E1213 06:50:19.416459 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82ebb804-a23f-4cb3-8f2b-2cfda6815108" containerName="init" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.416466 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="82ebb804-a23f-4cb3-8f2b-2cfda6815108" containerName="init" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.416670 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="a670fb50-d533-4a39-a9b5-3b350ab332ed" containerName="cinder-api-log" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.416688 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="82ebb804-a23f-4cb3-8f2b-2cfda6815108" containerName="init" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.416699 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="a670fb50-d533-4a39-a9b5-3b350ab332ed" containerName="cinder-api" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.418656 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.422462 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.423675 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.423902 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.427319 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.446291 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.523627 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2341822f-d44f-47a2-a543-655dc0b26866-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.523702 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rcnnv\" (UniqueName: \"kubernetes.io/projected/2341822f-d44f-47a2-a543-655dc0b26866-kube-api-access-rcnnv\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.524196 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2341822f-d44f-47a2-a543-655dc0b26866-scripts\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.524381 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2341822f-d44f-47a2-a543-655dc0b26866-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.524577 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2341822f-d44f-47a2-a543-655dc0b26866-public-tls-certs\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.524648 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2341822f-d44f-47a2-a543-655dc0b26866-config-data\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.524677 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2341822f-d44f-47a2-a543-655dc0b26866-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.524776 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2341822f-d44f-47a2-a543-655dc0b26866-config-data-custom\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.524805 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2341822f-d44f-47a2-a543-655dc0b26866-logs\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.555020 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7b89f46c6-sjtfz" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.626543 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2341822f-d44f-47a2-a543-655dc0b26866-public-tls-certs\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.626596 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2341822f-d44f-47a2-a543-655dc0b26866-config-data\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.626620 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2341822f-d44f-47a2-a543-655dc0b26866-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.626689 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2341822f-d44f-47a2-a543-655dc0b26866-config-data-custom\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.626710 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2341822f-d44f-47a2-a543-655dc0b26866-logs\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.626835 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2341822f-d44f-47a2-a543-655dc0b26866-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.626862 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rcnnv\" (UniqueName: \"kubernetes.io/projected/2341822f-d44f-47a2-a543-655dc0b26866-kube-api-access-rcnnv\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.626891 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2341822f-d44f-47a2-a543-655dc0b26866-scripts\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.626970 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2341822f-d44f-47a2-a543-655dc0b26866-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.627828 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2341822f-d44f-47a2-a543-655dc0b26866-logs\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.627913 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2341822f-d44f-47a2-a543-655dc0b26866-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.633935 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2341822f-d44f-47a2-a543-655dc0b26866-config-data-custom\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.644536 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2341822f-d44f-47a2-a543-655dc0b26866-public-tls-certs\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.647582 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2341822f-d44f-47a2-a543-655dc0b26866-scripts\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.648512 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2341822f-d44f-47a2-a543-655dc0b26866-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.648600 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2341822f-d44f-47a2-a543-655dc0b26866-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.649904 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rcnnv\" (UniqueName: \"kubernetes.io/projected/2341822f-d44f-47a2-a543-655dc0b26866-kube-api-access-rcnnv\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.678076 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2341822f-d44f-47a2-a543-655dc0b26866-config-data\") pod \"cinder-api-0\" (UID: \"2341822f-d44f-47a2-a543-655dc0b26866\") " pod="openstack/cinder-api-0" Dec 13 06:50:19 crc kubenswrapper[5048]: I1213 06:50:19.741770 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 13 06:50:20 crc kubenswrapper[5048]: W1213 06:50:20.254531 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2341822f_d44f_47a2_a543_655dc0b26866.slice/crio-8b6682f15b155f07971913f160ce139276afdea880753e1cc08e541b1e594124 WatchSource:0}: Error finding container 8b6682f15b155f07971913f160ce139276afdea880753e1cc08e541b1e594124: Status 404 returned error can't find the container with id 8b6682f15b155f07971913f160ce139276afdea880753e1cc08e541b1e594124 Dec 13 06:50:20 crc kubenswrapper[5048]: I1213 06:50:20.258187 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 13 06:50:20 crc kubenswrapper[5048]: I1213 06:50:20.316407 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2341822f-d44f-47a2-a543-655dc0b26866","Type":"ContainerStarted","Data":"8b6682f15b155f07971913f160ce139276afdea880753e1cc08e541b1e594124"} Dec 13 06:50:20 crc kubenswrapper[5048]: I1213 06:50:20.318282 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b5edcb36-9ff8-4be5-9891-75151b570837","Type":"ContainerStarted","Data":"d4642ca362d8f712f07fb21488d8ea0db126279cce9419e173ec1b1e99e6ffb0"} Dec 13 06:50:20 crc kubenswrapper[5048]: I1213 06:50:20.358909 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=6.142004547 podStartE2EDuration="7.358891537s" podCreationTimestamp="2025-12-13 06:50:13 +0000 UTC" firstStartedPulling="2025-12-13 06:50:16.139858476 +0000 UTC m=+1250.006453057" lastFinishedPulling="2025-12-13 06:50:17.356745466 +0000 UTC m=+1251.223340047" observedRunningTime="2025-12-13 06:50:20.341139321 +0000 UTC m=+1254.207733922" watchObservedRunningTime="2025-12-13 06:50:20.358891537 +0000 UTC m=+1254.225486118" Dec 13 06:50:20 crc kubenswrapper[5048]: I1213 06:50:20.585248 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a670fb50-d533-4a39-a9b5-3b350ab332ed" path="/var/lib/kubelet/pods/a670fb50-d533-4a39-a9b5-3b350ab332ed/volumes" Dec 13 06:50:21 crc kubenswrapper[5048]: I1213 06:50:21.341209 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2341822f-d44f-47a2-a543-655dc0b26866","Type":"ContainerStarted","Data":"63b16d56772caf69893938ffef5ff4a0fdd379232c567434f30196f11118d000"} Dec 13 06:50:21 crc kubenswrapper[5048]: I1213 06:50:21.427490 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:50:21 crc kubenswrapper[5048]: I1213 06:50:21.517224 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-657fc95f76-vznd4" Dec 13 06:50:21 crc kubenswrapper[5048]: I1213 06:50:21.600665 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5fdc45567b-kg45h"] Dec 13 06:50:22 crc kubenswrapper[5048]: I1213 06:50:22.350846 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2341822f-d44f-47a2-a543-655dc0b26866","Type":"ContainerStarted","Data":"c2951254dedf6a835d06ae4892a9b3a3ec2e1b2cf547d105b143ae63ebebe08a"} Dec 13 06:50:22 crc kubenswrapper[5048]: I1213 06:50:22.350971 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5fdc45567b-kg45h" podUID="3c13197f-14c3-45a9-ba9c-bc89b80d6169" containerName="horizon-log" containerID="cri-o://97618be3b26b3b31a7f8fcb9a3ed26dd8767012b12160a1b25cdadbb069ffb87" gracePeriod=30 Dec 13 06:50:22 crc kubenswrapper[5048]: I1213 06:50:22.351062 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5fdc45567b-kg45h" podUID="3c13197f-14c3-45a9-ba9c-bc89b80d6169" containerName="horizon" containerID="cri-o://a4c525fef07828f178bd4736adb2426b714431b369912c1d75d9b97962f6a924" gracePeriod=30 Dec 13 06:50:22 crc kubenswrapper[5048]: I1213 06:50:22.381045 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.381023949 podStartE2EDuration="3.381023949s" podCreationTimestamp="2025-12-13 06:50:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:50:22.374365078 +0000 UTC m=+1256.240959669" watchObservedRunningTime="2025-12-13 06:50:22.381023949 +0000 UTC m=+1256.247618530" Dec 13 06:50:23 crc kubenswrapper[5048]: I1213 06:50:23.361024 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Dec 13 06:50:23 crc kubenswrapper[5048]: I1213 06:50:23.580723 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:50:23 crc kubenswrapper[5048]: I1213 06:50:23.666276 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-k4rmf"] Dec 13 06:50:23 crc kubenswrapper[5048]: I1213 06:50:23.666771 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" podUID="830ced8e-31e0-474d-a13e-93d1825c2c8f" containerName="dnsmasq-dns" containerID="cri-o://1131c364d46f9f2732f0aa2dcd63b075c6fbc4207d686609f0cdfcc6f2a42f3f" gracePeriod=10 Dec 13 06:50:23 crc kubenswrapper[5048]: I1213 06:50:23.689819 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-6465fd554f-k9lkr" Dec 13 06:50:23 crc kubenswrapper[5048]: I1213 06:50:23.724325 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Dec 13 06:50:23 crc kubenswrapper[5048]: I1213 06:50:23.760147 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7b89f46c6-sjtfz"] Dec 13 06:50:23 crc kubenswrapper[5048]: I1213 06:50:23.760394 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7b89f46c6-sjtfz" podUID="a6dbb4d5-4873-4bd5-a300-6bf8334f14db" containerName="neutron-api" containerID="cri-o://831a70e5ee0f1376097aa2d55a51702cb5c665cde4634627f05d7c49649253ad" gracePeriod=30 Dec 13 06:50:23 crc kubenswrapper[5048]: I1213 06:50:23.760729 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7b89f46c6-sjtfz" podUID="a6dbb4d5-4873-4bd5-a300-6bf8334f14db" containerName="neutron-httpd" containerID="cri-o://d958edd44a5c7ad8c726d4d8fcc4015eac30080837930136ff0737d4c8849b05" gracePeriod=30 Dec 13 06:50:23 crc kubenswrapper[5048]: I1213 06:50:23.809678 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.044122 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-94459c6fd-6dkbd" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.129662 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-648c8d65dd-tpz2s"] Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.141173 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-648c8d65dd-tpz2s" podUID="0fb59ccf-cf8a-47f3-ad52-283c568bfb44" containerName="barbican-api-log" containerID="cri-o://1899709674bb106a0c0bd4e1d5e157617b213223d0bacf552e443a8af15566fe" gracePeriod=30 Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.142195 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-648c8d65dd-tpz2s" podUID="0fb59ccf-cf8a-47f3-ad52-283c568bfb44" containerName="barbican-api" containerID="cri-o://a600dd9724e454bf8a954ff8cf408d3f05833bb1b1e2f23754f8dc6a2632dde5" gracePeriod=30 Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.325619 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-648c8d65dd-tpz2s" podUID="0fb59ccf-cf8a-47f3-ad52-283c568bfb44" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": EOF" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.325688 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-648c8d65dd-tpz2s" podUID="0fb59ccf-cf8a-47f3-ad52-283c568bfb44" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": EOF" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.432700 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.440078 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.454104 5048 generic.go:334] "Generic (PLEG): container finished" podID="830ced8e-31e0-474d-a13e-93d1825c2c8f" containerID="1131c364d46f9f2732f0aa2dcd63b075c6fbc4207d686609f0cdfcc6f2a42f3f" exitCode=0 Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.454185 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" event={"ID":"830ced8e-31e0-474d-a13e-93d1825c2c8f","Type":"ContainerDied","Data":"1131c364d46f9f2732f0aa2dcd63b075c6fbc4207d686609f0cdfcc6f2a42f3f"} Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.454212 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" event={"ID":"830ced8e-31e0-474d-a13e-93d1825c2c8f","Type":"ContainerDied","Data":"9189d8a81944443aa457792af83d6b296a46e60af5d34d1c17ccd4248db81eda"} Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.454227 5048 scope.go:117] "RemoveContainer" containerID="1131c364d46f9f2732f0aa2dcd63b075c6fbc4207d686609f0cdfcc6f2a42f3f" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.469305 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-ovsdbserver-sb\") pod \"830ced8e-31e0-474d-a13e-93d1825c2c8f\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.469385 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-dns-swift-storage-0\") pod \"830ced8e-31e0-474d-a13e-93d1825c2c8f\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.469408 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-dns-svc\") pod \"830ced8e-31e0-474d-a13e-93d1825c2c8f\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.498705 5048 generic.go:334] "Generic (PLEG): container finished" podID="0fb59ccf-cf8a-47f3-ad52-283c568bfb44" containerID="1899709674bb106a0c0bd4e1d5e157617b213223d0bacf552e443a8af15566fe" exitCode=143 Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.498796 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-648c8d65dd-tpz2s" event={"ID":"0fb59ccf-cf8a-47f3-ad52-283c568bfb44","Type":"ContainerDied","Data":"1899709674bb106a0c0bd4e1d5e157617b213223d0bacf552e443a8af15566fe"} Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.507611 5048 generic.go:334] "Generic (PLEG): container finished" podID="a6dbb4d5-4873-4bd5-a300-6bf8334f14db" containerID="d958edd44a5c7ad8c726d4d8fcc4015eac30080837930136ff0737d4c8849b05" exitCode=0 Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.508557 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7b89f46c6-sjtfz" event={"ID":"a6dbb4d5-4873-4bd5-a300-6bf8334f14db","Type":"ContainerDied","Data":"d958edd44a5c7ad8c726d4d8fcc4015eac30080837930136ff0737d4c8849b05"} Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.575315 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-config\") pod \"830ced8e-31e0-474d-a13e-93d1825c2c8f\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.581510 5048 scope.go:117] "RemoveContainer" containerID="5ef04b3283502adc5b482e11faccbf5090cd26fcbece944aefc1b0b0ff7bae6f" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.584552 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w65ns\" (UniqueName: \"kubernetes.io/projected/830ced8e-31e0-474d-a13e-93d1825c2c8f-kube-api-access-w65ns\") pod \"830ced8e-31e0-474d-a13e-93d1825c2c8f\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.584601 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-ovsdbserver-nb\") pod \"830ced8e-31e0-474d-a13e-93d1825c2c8f\" (UID: \"830ced8e-31e0-474d-a13e-93d1825c2c8f\") " Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.606188 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "830ced8e-31e0-474d-a13e-93d1825c2c8f" (UID: "830ced8e-31e0-474d-a13e-93d1825c2c8f"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.619676 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/830ced8e-31e0-474d-a13e-93d1825c2c8f-kube-api-access-w65ns" (OuterVolumeSpecName: "kube-api-access-w65ns") pod "830ced8e-31e0-474d-a13e-93d1825c2c8f" (UID: "830ced8e-31e0-474d-a13e-93d1825c2c8f"). InnerVolumeSpecName "kube-api-access-w65ns". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.626823 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.636074 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "830ced8e-31e0-474d-a13e-93d1825c2c8f" (UID: "830ced8e-31e0-474d-a13e-93d1825c2c8f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.651592 5048 scope.go:117] "RemoveContainer" containerID="1131c364d46f9f2732f0aa2dcd63b075c6fbc4207d686609f0cdfcc6f2a42f3f" Dec 13 06:50:24 crc kubenswrapper[5048]: E1213 06:50:24.652039 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1131c364d46f9f2732f0aa2dcd63b075c6fbc4207d686609f0cdfcc6f2a42f3f\": container with ID starting with 1131c364d46f9f2732f0aa2dcd63b075c6fbc4207d686609f0cdfcc6f2a42f3f not found: ID does not exist" containerID="1131c364d46f9f2732f0aa2dcd63b075c6fbc4207d686609f0cdfcc6f2a42f3f" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.652070 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1131c364d46f9f2732f0aa2dcd63b075c6fbc4207d686609f0cdfcc6f2a42f3f"} err="failed to get container status \"1131c364d46f9f2732f0aa2dcd63b075c6fbc4207d686609f0cdfcc6f2a42f3f\": rpc error: code = NotFound desc = could not find container \"1131c364d46f9f2732f0aa2dcd63b075c6fbc4207d686609f0cdfcc6f2a42f3f\": container with ID starting with 1131c364d46f9f2732f0aa2dcd63b075c6fbc4207d686609f0cdfcc6f2a42f3f not found: ID does not exist" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.652090 5048 scope.go:117] "RemoveContainer" containerID="5ef04b3283502adc5b482e11faccbf5090cd26fcbece944aefc1b0b0ff7bae6f" Dec 13 06:50:24 crc kubenswrapper[5048]: E1213 06:50:24.652266 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ef04b3283502adc5b482e11faccbf5090cd26fcbece944aefc1b0b0ff7bae6f\": container with ID starting with 5ef04b3283502adc5b482e11faccbf5090cd26fcbece944aefc1b0b0ff7bae6f not found: ID does not exist" containerID="5ef04b3283502adc5b482e11faccbf5090cd26fcbece944aefc1b0b0ff7bae6f" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.652288 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ef04b3283502adc5b482e11faccbf5090cd26fcbece944aefc1b0b0ff7bae6f"} err="failed to get container status \"5ef04b3283502adc5b482e11faccbf5090cd26fcbece944aefc1b0b0ff7bae6f\": rpc error: code = NotFound desc = could not find container \"5ef04b3283502adc5b482e11faccbf5090cd26fcbece944aefc1b0b0ff7bae6f\": container with ID starting with 5ef04b3283502adc5b482e11faccbf5090cd26fcbece944aefc1b0b0ff7bae6f not found: ID does not exist" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.653874 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "830ced8e-31e0-474d-a13e-93d1825c2c8f" (UID: "830ced8e-31e0-474d-a13e-93d1825c2c8f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.670278 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-config" (OuterVolumeSpecName: "config") pod "830ced8e-31e0-474d-a13e-93d1825c2c8f" (UID: "830ced8e-31e0-474d-a13e-93d1825c2c8f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.677952 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "830ced8e-31e0-474d-a13e-93d1825c2c8f" (UID: "830ced8e-31e0-474d-a13e-93d1825c2c8f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.694996 5048 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.695025 5048 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.695039 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w65ns\" (UniqueName: \"kubernetes.io/projected/830ced8e-31e0-474d-a13e-93d1825c2c8f-kube-api-access-w65ns\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.695050 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.695060 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:24 crc kubenswrapper[5048]: I1213 06:50:24.695068 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/830ced8e-31e0-474d-a13e-93d1825c2c8f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:24 crc kubenswrapper[5048]: E1213 06:50:24.710626 5048 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0fb59ccf_cf8a_47f3_ad52_283c568bfb44.slice/crio-conmon-1899709674bb106a0c0bd4e1d5e157617b213223d0bacf552e443a8af15566fe.scope\": RecentStats: unable to find data in memory cache]" Dec 13 06:50:25 crc kubenswrapper[5048]: I1213 06:50:25.082811 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-7b9cc68dcb-22pkt" Dec 13 06:50:25 crc kubenswrapper[5048]: I1213 06:50:25.516544 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="b5edcb36-9ff8-4be5-9891-75151b570837" containerName="cinder-scheduler" containerID="cri-o://152aa3cf0026050ddb4648f5faf2d81e82a9f98df5555d4866abd43f6646003e" gracePeriod=30 Dec 13 06:50:25 crc kubenswrapper[5048]: I1213 06:50:25.516804 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-k4rmf" Dec 13 06:50:25 crc kubenswrapper[5048]: I1213 06:50:25.517163 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="b5edcb36-9ff8-4be5-9891-75151b570837" containerName="probe" containerID="cri-o://d4642ca362d8f712f07fb21488d8ea0db126279cce9419e173ec1b1e99e6ffb0" gracePeriod=30 Dec 13 06:50:25 crc kubenswrapper[5048]: I1213 06:50:25.553733 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-k4rmf"] Dec 13 06:50:25 crc kubenswrapper[5048]: I1213 06:50:25.570476 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-k4rmf"] Dec 13 06:50:26 crc kubenswrapper[5048]: I1213 06:50:26.525048 5048 generic.go:334] "Generic (PLEG): container finished" podID="3c13197f-14c3-45a9-ba9c-bc89b80d6169" containerID="a4c525fef07828f178bd4736adb2426b714431b369912c1d75d9b97962f6a924" exitCode=0 Dec 13 06:50:26 crc kubenswrapper[5048]: I1213 06:50:26.525095 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fdc45567b-kg45h" event={"ID":"3c13197f-14c3-45a9-ba9c-bc89b80d6169","Type":"ContainerDied","Data":"a4c525fef07828f178bd4736adb2426b714431b369912c1d75d9b97962f6a924"} Dec 13 06:50:26 crc kubenswrapper[5048]: I1213 06:50:26.578154 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="830ced8e-31e0-474d-a13e-93d1825c2c8f" path="/var/lib/kubelet/pods/830ced8e-31e0-474d-a13e-93d1825c2c8f/volumes" Dec 13 06:50:26 crc kubenswrapper[5048]: I1213 06:50:26.792392 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5fdc45567b-kg45h" podUID="3c13197f-14c3-45a9-ba9c-bc89b80d6169" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.145:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.145:8443: connect: connection refused" Dec 13 06:50:27 crc kubenswrapper[5048]: I1213 06:50:27.534194 5048 generic.go:334] "Generic (PLEG): container finished" podID="b5edcb36-9ff8-4be5-9891-75151b570837" containerID="d4642ca362d8f712f07fb21488d8ea0db126279cce9419e173ec1b1e99e6ffb0" exitCode=0 Dec 13 06:50:27 crc kubenswrapper[5048]: I1213 06:50:27.534394 5048 generic.go:334] "Generic (PLEG): container finished" podID="b5edcb36-9ff8-4be5-9891-75151b570837" containerID="152aa3cf0026050ddb4648f5faf2d81e82a9f98df5555d4866abd43f6646003e" exitCode=0 Dec 13 06:50:27 crc kubenswrapper[5048]: I1213 06:50:27.534413 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b5edcb36-9ff8-4be5-9891-75151b570837","Type":"ContainerDied","Data":"d4642ca362d8f712f07fb21488d8ea0db126279cce9419e173ec1b1e99e6ffb0"} Dec 13 06:50:27 crc kubenswrapper[5048]: I1213 06:50:27.534482 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b5edcb36-9ff8-4be5-9891-75151b570837","Type":"ContainerDied","Data":"152aa3cf0026050ddb4648f5faf2d81e82a9f98df5555d4866abd43f6646003e"} Dec 13 06:50:27 crc kubenswrapper[5048]: I1213 06:50:27.942446 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.080521 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b5edcb36-9ff8-4be5-9891-75151b570837-etc-machine-id\") pod \"b5edcb36-9ff8-4be5-9891-75151b570837\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.080700 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-scripts\") pod \"b5edcb36-9ff8-4be5-9891-75151b570837\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.080781 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-config-data-custom\") pod \"b5edcb36-9ff8-4be5-9891-75151b570837\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.080920 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b5edcb36-9ff8-4be5-9891-75151b570837-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "b5edcb36-9ff8-4be5-9891-75151b570837" (UID: "b5edcb36-9ff8-4be5-9891-75151b570837"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.081092 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p484c\" (UniqueName: \"kubernetes.io/projected/b5edcb36-9ff8-4be5-9891-75151b570837-kube-api-access-p484c\") pod \"b5edcb36-9ff8-4be5-9891-75151b570837\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.081220 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-config-data\") pod \"b5edcb36-9ff8-4be5-9891-75151b570837\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.081347 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-combined-ca-bundle\") pod \"b5edcb36-9ff8-4be5-9891-75151b570837\" (UID: \"b5edcb36-9ff8-4be5-9891-75151b570837\") " Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.082591 5048 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b5edcb36-9ff8-4be5-9891-75151b570837-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.089045 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b5edcb36-9ff8-4be5-9891-75151b570837" (UID: "b5edcb36-9ff8-4be5-9891-75151b570837"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.089370 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5edcb36-9ff8-4be5-9891-75151b570837-kube-api-access-p484c" (OuterVolumeSpecName: "kube-api-access-p484c") pod "b5edcb36-9ff8-4be5-9891-75151b570837" (UID: "b5edcb36-9ff8-4be5-9891-75151b570837"). InnerVolumeSpecName "kube-api-access-p484c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.105867 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-scripts" (OuterVolumeSpecName: "scripts") pod "b5edcb36-9ff8-4be5-9891-75151b570837" (UID: "b5edcb36-9ff8-4be5-9891-75151b570837"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.184809 5048 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.184853 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p484c\" (UniqueName: \"kubernetes.io/projected/b5edcb36-9ff8-4be5-9891-75151b570837-kube-api-access-p484c\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.184867 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.240365 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b5edcb36-9ff8-4be5-9891-75151b570837" (UID: "b5edcb36-9ff8-4be5-9891-75151b570837"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.267531 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-config-data" (OuterVolumeSpecName: "config-data") pod "b5edcb36-9ff8-4be5-9891-75151b570837" (UID: "b5edcb36-9ff8-4be5-9891-75151b570837"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.292112 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.292155 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5edcb36-9ff8-4be5-9891-75151b570837-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.323288 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7b89f46c6-sjtfz" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.393238 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-httpd-config\") pod \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\" (UID: \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\") " Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.393427 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-config\") pod \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\" (UID: \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\") " Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.393480 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fp4dj\" (UniqueName: \"kubernetes.io/projected/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-kube-api-access-fp4dj\") pod \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\" (UID: \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\") " Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.393508 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-ovndb-tls-certs\") pod \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\" (UID: \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\") " Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.393562 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-combined-ca-bundle\") pod \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\" (UID: \"a6dbb4d5-4873-4bd5-a300-6bf8334f14db\") " Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.400847 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-kube-api-access-fp4dj" (OuterVolumeSpecName: "kube-api-access-fp4dj") pod "a6dbb4d5-4873-4bd5-a300-6bf8334f14db" (UID: "a6dbb4d5-4873-4bd5-a300-6bf8334f14db"). InnerVolumeSpecName "kube-api-access-fp4dj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.402751 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "a6dbb4d5-4873-4bd5-a300-6bf8334f14db" (UID: "a6dbb4d5-4873-4bd5-a300-6bf8334f14db"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.442721 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-config" (OuterVolumeSpecName: "config") pod "a6dbb4d5-4873-4bd5-a300-6bf8334f14db" (UID: "a6dbb4d5-4873-4bd5-a300-6bf8334f14db"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.445673 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a6dbb4d5-4873-4bd5-a300-6bf8334f14db" (UID: "a6dbb4d5-4873-4bd5-a300-6bf8334f14db"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.476708 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "a6dbb4d5-4873-4bd5-a300-6bf8334f14db" (UID: "a6dbb4d5-4873-4bd5-a300-6bf8334f14db"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.496054 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.496095 5048 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-httpd-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.496106 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.496114 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fp4dj\" (UniqueName: \"kubernetes.io/projected/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-kube-api-access-fp4dj\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.496124 5048 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a6dbb4d5-4873-4bd5-a300-6bf8334f14db-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.544757 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b5edcb36-9ff8-4be5-9891-75151b570837","Type":"ContainerDied","Data":"51f41a66093ad244aa9b0e28e7261be5337173ed365d1eacb8c75902947cf6c1"} Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.544816 5048 scope.go:117] "RemoveContainer" containerID="d4642ca362d8f712f07fb21488d8ea0db126279cce9419e173ec1b1e99e6ffb0" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.544967 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.552678 5048 generic.go:334] "Generic (PLEG): container finished" podID="a6dbb4d5-4873-4bd5-a300-6bf8334f14db" containerID="831a70e5ee0f1376097aa2d55a51702cb5c665cde4634627f05d7c49649253ad" exitCode=0 Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.552732 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7b89f46c6-sjtfz" event={"ID":"a6dbb4d5-4873-4bd5-a300-6bf8334f14db","Type":"ContainerDied","Data":"831a70e5ee0f1376097aa2d55a51702cb5c665cde4634627f05d7c49649253ad"} Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.552773 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7b89f46c6-sjtfz" event={"ID":"a6dbb4d5-4873-4bd5-a300-6bf8334f14db","Type":"ContainerDied","Data":"0e257bdbbd34a08996fbf401438f52d6a87ed5fff22ad4822bed5e321801b1c8"} Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.552879 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7b89f46c6-sjtfz" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.579566 5048 scope.go:117] "RemoveContainer" containerID="152aa3cf0026050ddb4648f5faf2d81e82a9f98df5555d4866abd43f6646003e" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.593556 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7b89f46c6-sjtfz"] Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.610381 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-7b89f46c6-sjtfz"] Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.617417 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.618574 5048 scope.go:117] "RemoveContainer" containerID="d958edd44a5c7ad8c726d4d8fcc4015eac30080837930136ff0737d4c8849b05" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.626706 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.642602 5048 scope.go:117] "RemoveContainer" containerID="831a70e5ee0f1376097aa2d55a51702cb5c665cde4634627f05d7c49649253ad" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.646690 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Dec 13 06:50:28 crc kubenswrapper[5048]: E1213 06:50:28.647130 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="830ced8e-31e0-474d-a13e-93d1825c2c8f" containerName="dnsmasq-dns" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.647156 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="830ced8e-31e0-474d-a13e-93d1825c2c8f" containerName="dnsmasq-dns" Dec 13 06:50:28 crc kubenswrapper[5048]: E1213 06:50:28.647171 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6dbb4d5-4873-4bd5-a300-6bf8334f14db" containerName="neutron-httpd" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.647182 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6dbb4d5-4873-4bd5-a300-6bf8334f14db" containerName="neutron-httpd" Dec 13 06:50:28 crc kubenswrapper[5048]: E1213 06:50:28.647198 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6dbb4d5-4873-4bd5-a300-6bf8334f14db" containerName="neutron-api" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.647206 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6dbb4d5-4873-4bd5-a300-6bf8334f14db" containerName="neutron-api" Dec 13 06:50:28 crc kubenswrapper[5048]: E1213 06:50:28.647235 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5edcb36-9ff8-4be5-9891-75151b570837" containerName="cinder-scheduler" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.647243 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5edcb36-9ff8-4be5-9891-75151b570837" containerName="cinder-scheduler" Dec 13 06:50:28 crc kubenswrapper[5048]: E1213 06:50:28.647262 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5edcb36-9ff8-4be5-9891-75151b570837" containerName="probe" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.647271 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5edcb36-9ff8-4be5-9891-75151b570837" containerName="probe" Dec 13 06:50:28 crc kubenswrapper[5048]: E1213 06:50:28.647283 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="830ced8e-31e0-474d-a13e-93d1825c2c8f" containerName="init" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.647290 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="830ced8e-31e0-474d-a13e-93d1825c2c8f" containerName="init" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.647509 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6dbb4d5-4873-4bd5-a300-6bf8334f14db" containerName="neutron-httpd" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.647526 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6dbb4d5-4873-4bd5-a300-6bf8334f14db" containerName="neutron-api" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.647538 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5edcb36-9ff8-4be5-9891-75151b570837" containerName="probe" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.647550 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="830ced8e-31e0-474d-a13e-93d1825c2c8f" containerName="dnsmasq-dns" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.647569 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5edcb36-9ff8-4be5-9891-75151b570837" containerName="cinder-scheduler" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.648581 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.653886 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.661917 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.688808 5048 scope.go:117] "RemoveContainer" containerID="d958edd44a5c7ad8c726d4d8fcc4015eac30080837930136ff0737d4c8849b05" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.699403 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e215aafd-55f0-449e-9886-fa2b93d7fd83-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"e215aafd-55f0-449e-9886-fa2b93d7fd83\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.699476 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e215aafd-55f0-449e-9886-fa2b93d7fd83-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"e215aafd-55f0-449e-9886-fa2b93d7fd83\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.699546 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e215aafd-55f0-449e-9886-fa2b93d7fd83-config-data\") pod \"cinder-scheduler-0\" (UID: \"e215aafd-55f0-449e-9886-fa2b93d7fd83\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.699571 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e215aafd-55f0-449e-9886-fa2b93d7fd83-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"e215aafd-55f0-449e-9886-fa2b93d7fd83\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.699608 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e215aafd-55f0-449e-9886-fa2b93d7fd83-scripts\") pod \"cinder-scheduler-0\" (UID: \"e215aafd-55f0-449e-9886-fa2b93d7fd83\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.699838 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnqxk\" (UniqueName: \"kubernetes.io/projected/e215aafd-55f0-449e-9886-fa2b93d7fd83-kube-api-access-pnqxk\") pod \"cinder-scheduler-0\" (UID: \"e215aafd-55f0-449e-9886-fa2b93d7fd83\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:28 crc kubenswrapper[5048]: E1213 06:50:28.709031 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d958edd44a5c7ad8c726d4d8fcc4015eac30080837930136ff0737d4c8849b05\": container with ID starting with d958edd44a5c7ad8c726d4d8fcc4015eac30080837930136ff0737d4c8849b05 not found: ID does not exist" containerID="d958edd44a5c7ad8c726d4d8fcc4015eac30080837930136ff0737d4c8849b05" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.709088 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d958edd44a5c7ad8c726d4d8fcc4015eac30080837930136ff0737d4c8849b05"} err="failed to get container status \"d958edd44a5c7ad8c726d4d8fcc4015eac30080837930136ff0737d4c8849b05\": rpc error: code = NotFound desc = could not find container \"d958edd44a5c7ad8c726d4d8fcc4015eac30080837930136ff0737d4c8849b05\": container with ID starting with d958edd44a5c7ad8c726d4d8fcc4015eac30080837930136ff0737d4c8849b05 not found: ID does not exist" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.709119 5048 scope.go:117] "RemoveContainer" containerID="831a70e5ee0f1376097aa2d55a51702cb5c665cde4634627f05d7c49649253ad" Dec 13 06:50:28 crc kubenswrapper[5048]: E1213 06:50:28.710171 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"831a70e5ee0f1376097aa2d55a51702cb5c665cde4634627f05d7c49649253ad\": container with ID starting with 831a70e5ee0f1376097aa2d55a51702cb5c665cde4634627f05d7c49649253ad not found: ID does not exist" containerID="831a70e5ee0f1376097aa2d55a51702cb5c665cde4634627f05d7c49649253ad" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.710236 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"831a70e5ee0f1376097aa2d55a51702cb5c665cde4634627f05d7c49649253ad"} err="failed to get container status \"831a70e5ee0f1376097aa2d55a51702cb5c665cde4634627f05d7c49649253ad\": rpc error: code = NotFound desc = could not find container \"831a70e5ee0f1376097aa2d55a51702cb5c665cde4634627f05d7c49649253ad\": container with ID starting with 831a70e5ee0f1376097aa2d55a51702cb5c665cde4634627f05d7c49649253ad not found: ID does not exist" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.801104 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e215aafd-55f0-449e-9886-fa2b93d7fd83-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"e215aafd-55f0-449e-9886-fa2b93d7fd83\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.801395 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e215aafd-55f0-449e-9886-fa2b93d7fd83-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"e215aafd-55f0-449e-9886-fa2b93d7fd83\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.801447 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e215aafd-55f0-449e-9886-fa2b93d7fd83-config-data\") pod \"cinder-scheduler-0\" (UID: \"e215aafd-55f0-449e-9886-fa2b93d7fd83\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.801467 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e215aafd-55f0-449e-9886-fa2b93d7fd83-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"e215aafd-55f0-449e-9886-fa2b93d7fd83\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.801494 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e215aafd-55f0-449e-9886-fa2b93d7fd83-scripts\") pod \"cinder-scheduler-0\" (UID: \"e215aafd-55f0-449e-9886-fa2b93d7fd83\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.801569 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnqxk\" (UniqueName: \"kubernetes.io/projected/e215aafd-55f0-449e-9886-fa2b93d7fd83-kube-api-access-pnqxk\") pod \"cinder-scheduler-0\" (UID: \"e215aafd-55f0-449e-9886-fa2b93d7fd83\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.802574 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e215aafd-55f0-449e-9886-fa2b93d7fd83-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"e215aafd-55f0-449e-9886-fa2b93d7fd83\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.806010 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e215aafd-55f0-449e-9886-fa2b93d7fd83-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"e215aafd-55f0-449e-9886-fa2b93d7fd83\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.810347 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e215aafd-55f0-449e-9886-fa2b93d7fd83-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"e215aafd-55f0-449e-9886-fa2b93d7fd83\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.811095 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e215aafd-55f0-449e-9886-fa2b93d7fd83-config-data\") pod \"cinder-scheduler-0\" (UID: \"e215aafd-55f0-449e-9886-fa2b93d7fd83\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.811216 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e215aafd-55f0-449e-9886-fa2b93d7fd83-scripts\") pod \"cinder-scheduler-0\" (UID: \"e215aafd-55f0-449e-9886-fa2b93d7fd83\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.816765 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnqxk\" (UniqueName: \"kubernetes.io/projected/e215aafd-55f0-449e-9886-fa2b93d7fd83-kube-api-access-pnqxk\") pod \"cinder-scheduler-0\" (UID: \"e215aafd-55f0-449e-9886-fa2b93d7fd83\") " pod="openstack/cinder-scheduler-0" Dec 13 06:50:28 crc kubenswrapper[5048]: I1213 06:50:28.989964 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 13 06:50:29 crc kubenswrapper[5048]: I1213 06:50:29.410705 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 13 06:50:29 crc kubenswrapper[5048]: I1213 06:50:29.579006 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e215aafd-55f0-449e-9886-fa2b93d7fd83","Type":"ContainerStarted","Data":"db52b1923cded602107149d67a6f9ab35bf1d20a6241bbaaad2b354b647ee996"} Dec 13 06:50:29 crc kubenswrapper[5048]: I1213 06:50:29.685541 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-648c8d65dd-tpz2s" podUID="0fb59ccf-cf8a-47f3-ad52-283c568bfb44" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": read tcp 10.217.0.2:49750->10.217.0.158:9311: read: connection reset by peer" Dec 13 06:50:29 crc kubenswrapper[5048]: I1213 06:50:29.685849 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-648c8d65dd-tpz2s" podUID="0fb59ccf-cf8a-47f3-ad52-283c568bfb44" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": read tcp 10.217.0.2:49736->10.217.0.158:9311: read: connection reset by peer" Dec 13 06:50:29 crc kubenswrapper[5048]: I1213 06:50:29.774652 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-648c8d65dd-tpz2s" podUID="0fb59ccf-cf8a-47f3-ad52-283c568bfb44" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": dial tcp 10.217.0.158:9311: connect: connection refused" Dec 13 06:50:29 crc kubenswrapper[5048]: I1213 06:50:29.774666 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-648c8d65dd-tpz2s" podUID="0fb59ccf-cf8a-47f3-ad52-283c568bfb44" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": dial tcp 10.217.0.158:9311: connect: connection refused" Dec 13 06:50:30 crc kubenswrapper[5048]: I1213 06:50:30.584730 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6dbb4d5-4873-4bd5-a300-6bf8334f14db" path="/var/lib/kubelet/pods/a6dbb4d5-4873-4bd5-a300-6bf8334f14db/volumes" Dec 13 06:50:30 crc kubenswrapper[5048]: I1213 06:50:30.586418 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5edcb36-9ff8-4be5-9891-75151b570837" path="/var/lib/kubelet/pods/b5edcb36-9ff8-4be5-9891-75151b570837/volumes" Dec 13 06:50:30 crc kubenswrapper[5048]: I1213 06:50:30.589538 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e215aafd-55f0-449e-9886-fa2b93d7fd83","Type":"ContainerStarted","Data":"af18d8fdc5342dd6e23616ad07a1b493b3405a47bda62e902b43570b18bfb52c"} Dec 13 06:50:30 crc kubenswrapper[5048]: I1213 06:50:30.593805 5048 generic.go:334] "Generic (PLEG): container finished" podID="0fb59ccf-cf8a-47f3-ad52-283c568bfb44" containerID="a600dd9724e454bf8a954ff8cf408d3f05833bb1b1e2f23754f8dc6a2632dde5" exitCode=0 Dec 13 06:50:30 crc kubenswrapper[5048]: I1213 06:50:30.593861 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-648c8d65dd-tpz2s" event={"ID":"0fb59ccf-cf8a-47f3-ad52-283c568bfb44","Type":"ContainerDied","Data":"a600dd9724e454bf8a954ff8cf408d3f05833bb1b1e2f23754f8dc6a2632dde5"} Dec 13 06:50:30 crc kubenswrapper[5048]: I1213 06:50:30.691252 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-648c8d65dd-tpz2s" Dec 13 06:50:30 crc kubenswrapper[5048]: I1213 06:50:30.737895 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-config-data-custom\") pod \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\" (UID: \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\") " Dec 13 06:50:30 crc kubenswrapper[5048]: I1213 06:50:30.737961 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-snlfn\" (UniqueName: \"kubernetes.io/projected/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-kube-api-access-snlfn\") pod \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\" (UID: \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\") " Dec 13 06:50:30 crc kubenswrapper[5048]: I1213 06:50:30.737982 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-combined-ca-bundle\") pod \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\" (UID: \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\") " Dec 13 06:50:30 crc kubenswrapper[5048]: I1213 06:50:30.737999 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-config-data\") pod \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\" (UID: \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\") " Dec 13 06:50:30 crc kubenswrapper[5048]: I1213 06:50:30.738033 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-logs\") pod \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\" (UID: \"0fb59ccf-cf8a-47f3-ad52-283c568bfb44\") " Dec 13 06:50:30 crc kubenswrapper[5048]: I1213 06:50:30.738953 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-logs" (OuterVolumeSpecName: "logs") pod "0fb59ccf-cf8a-47f3-ad52-283c568bfb44" (UID: "0fb59ccf-cf8a-47f3-ad52-283c568bfb44"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:50:30 crc kubenswrapper[5048]: I1213 06:50:30.769788 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-kube-api-access-snlfn" (OuterVolumeSpecName: "kube-api-access-snlfn") pod "0fb59ccf-cf8a-47f3-ad52-283c568bfb44" (UID: "0fb59ccf-cf8a-47f3-ad52-283c568bfb44"). InnerVolumeSpecName "kube-api-access-snlfn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:50:30 crc kubenswrapper[5048]: I1213 06:50:30.769874 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0fb59ccf-cf8a-47f3-ad52-283c568bfb44" (UID: "0fb59ccf-cf8a-47f3-ad52-283c568bfb44"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:30 crc kubenswrapper[5048]: I1213 06:50:30.793017 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0fb59ccf-cf8a-47f3-ad52-283c568bfb44" (UID: "0fb59ccf-cf8a-47f3-ad52-283c568bfb44"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:30 crc kubenswrapper[5048]: I1213 06:50:30.809099 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-config-data" (OuterVolumeSpecName: "config-data") pod "0fb59ccf-cf8a-47f3-ad52-283c568bfb44" (UID: "0fb59ccf-cf8a-47f3-ad52-283c568bfb44"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:30 crc kubenswrapper[5048]: I1213 06:50:30.839870 5048 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:30 crc kubenswrapper[5048]: I1213 06:50:30.839908 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-snlfn\" (UniqueName: \"kubernetes.io/projected/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-kube-api-access-snlfn\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:30 crc kubenswrapper[5048]: I1213 06:50:30.839922 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:30 crc kubenswrapper[5048]: I1213 06:50:30.839930 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:30 crc kubenswrapper[5048]: I1213 06:50:30.839938 5048 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fb59ccf-cf8a-47f3-ad52-283c568bfb44-logs\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:31 crc kubenswrapper[5048]: I1213 06:50:31.603460 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e215aafd-55f0-449e-9886-fa2b93d7fd83","Type":"ContainerStarted","Data":"20a29b7a36bc4e1c78182e214285075c1a1be4d302dbaab96a4d9343a3e8b984"} Dec 13 06:50:31 crc kubenswrapper[5048]: I1213 06:50:31.605261 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-648c8d65dd-tpz2s" event={"ID":"0fb59ccf-cf8a-47f3-ad52-283c568bfb44","Type":"ContainerDied","Data":"889cc4a180cd96338ff5a016fa96b80dff496fa3d6fe7fcdbba8edf5ca1de8b8"} Dec 13 06:50:31 crc kubenswrapper[5048]: I1213 06:50:31.605317 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-648c8d65dd-tpz2s" Dec 13 06:50:31 crc kubenswrapper[5048]: I1213 06:50:31.605328 5048 scope.go:117] "RemoveContainer" containerID="a600dd9724e454bf8a954ff8cf408d3f05833bb1b1e2f23754f8dc6a2632dde5" Dec 13 06:50:31 crc kubenswrapper[5048]: I1213 06:50:31.642974 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.642947882 podStartE2EDuration="3.642947882s" podCreationTimestamp="2025-12-13 06:50:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:50:31.634129811 +0000 UTC m=+1265.500724402" watchObservedRunningTime="2025-12-13 06:50:31.642947882 +0000 UTC m=+1265.509542483" Dec 13 06:50:31 crc kubenswrapper[5048]: I1213 06:50:31.643517 5048 scope.go:117] "RemoveContainer" containerID="1899709674bb106a0c0bd4e1d5e157617b213223d0bacf552e443a8af15566fe" Dec 13 06:50:31 crc kubenswrapper[5048]: I1213 06:50:31.681499 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-648c8d65dd-tpz2s"] Dec 13 06:50:31 crc kubenswrapper[5048]: I1213 06:50:31.689929 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-648c8d65dd-tpz2s"] Dec 13 06:50:31 crc kubenswrapper[5048]: I1213 06:50:31.825338 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Dec 13 06:50:32 crc kubenswrapper[5048]: I1213 06:50:32.578729 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0fb59ccf-cf8a-47f3-ad52-283c568bfb44" path="/var/lib/kubelet/pods/0fb59ccf-cf8a-47f3-ad52-283c568bfb44/volumes" Dec 13 06:50:33 crc kubenswrapper[5048]: I1213 06:50:33.990960 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Dec 13 06:50:36 crc kubenswrapper[5048]: I1213 06:50:36.793332 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5fdc45567b-kg45h" podUID="3c13197f-14c3-45a9-ba9c-bc89b80d6169" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.145:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.145:8443: connect: connection refused" Dec 13 06:50:36 crc kubenswrapper[5048]: I1213 06:50:36.798124 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-5c89574bc9-gw9cl" Dec 13 06:50:39 crc kubenswrapper[5048]: I1213 06:50:39.204718 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Dec 13 06:50:39 crc kubenswrapper[5048]: I1213 06:50:39.957852 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Dec 13 06:50:39 crc kubenswrapper[5048]: E1213 06:50:39.958605 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fb59ccf-cf8a-47f3-ad52-283c568bfb44" containerName="barbican-api-log" Dec 13 06:50:39 crc kubenswrapper[5048]: I1213 06:50:39.958630 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fb59ccf-cf8a-47f3-ad52-283c568bfb44" containerName="barbican-api-log" Dec 13 06:50:39 crc kubenswrapper[5048]: E1213 06:50:39.958667 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fb59ccf-cf8a-47f3-ad52-283c568bfb44" containerName="barbican-api" Dec 13 06:50:39 crc kubenswrapper[5048]: I1213 06:50:39.958676 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fb59ccf-cf8a-47f3-ad52-283c568bfb44" containerName="barbican-api" Dec 13 06:50:39 crc kubenswrapper[5048]: I1213 06:50:39.959403 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fb59ccf-cf8a-47f3-ad52-283c568bfb44" containerName="barbican-api-log" Dec 13 06:50:39 crc kubenswrapper[5048]: I1213 06:50:39.959504 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fb59ccf-cf8a-47f3-ad52-283c568bfb44" containerName="barbican-api" Dec 13 06:50:39 crc kubenswrapper[5048]: I1213 06:50:39.960542 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 13 06:50:39 crc kubenswrapper[5048]: I1213 06:50:39.969688 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Dec 13 06:50:39 crc kubenswrapper[5048]: I1213 06:50:39.969727 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-krd9w" Dec 13 06:50:39 crc kubenswrapper[5048]: I1213 06:50:39.969836 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Dec 13 06:50:39 crc kubenswrapper[5048]: I1213 06:50:39.991632 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.110648 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2b05e177-f20f-4cc7-a778-7f32d78a3231-openstack-config\") pod \"openstackclient\" (UID: \"2b05e177-f20f-4cc7-a778-7f32d78a3231\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.111024 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b05e177-f20f-4cc7-a778-7f32d78a3231-combined-ca-bundle\") pod \"openstackclient\" (UID: \"2b05e177-f20f-4cc7-a778-7f32d78a3231\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.111058 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rp9lr\" (UniqueName: \"kubernetes.io/projected/2b05e177-f20f-4cc7-a778-7f32d78a3231-kube-api-access-rp9lr\") pod \"openstackclient\" (UID: \"2b05e177-f20f-4cc7-a778-7f32d78a3231\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.111083 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2b05e177-f20f-4cc7-a778-7f32d78a3231-openstack-config-secret\") pod \"openstackclient\" (UID: \"2b05e177-f20f-4cc7-a778-7f32d78a3231\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.212721 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2b05e177-f20f-4cc7-a778-7f32d78a3231-openstack-config\") pod \"openstackclient\" (UID: \"2b05e177-f20f-4cc7-a778-7f32d78a3231\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.212788 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b05e177-f20f-4cc7-a778-7f32d78a3231-combined-ca-bundle\") pod \"openstackclient\" (UID: \"2b05e177-f20f-4cc7-a778-7f32d78a3231\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.212824 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp9lr\" (UniqueName: \"kubernetes.io/projected/2b05e177-f20f-4cc7-a778-7f32d78a3231-kube-api-access-rp9lr\") pod \"openstackclient\" (UID: \"2b05e177-f20f-4cc7-a778-7f32d78a3231\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.212856 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2b05e177-f20f-4cc7-a778-7f32d78a3231-openstack-config-secret\") pod \"openstackclient\" (UID: \"2b05e177-f20f-4cc7-a778-7f32d78a3231\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.214812 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2b05e177-f20f-4cc7-a778-7f32d78a3231-openstack-config\") pod \"openstackclient\" (UID: \"2b05e177-f20f-4cc7-a778-7f32d78a3231\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: E1213 06:50:40.218393 5048 projected.go:194] Error preparing data for projected volume kube-api-access-rp9lr for pod openstack/openstackclient: failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: User "system:node:crc" cannot create resource "serviceaccounts/token" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Dec 13 06:50:40 crc kubenswrapper[5048]: E1213 06:50:40.218474 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2b05e177-f20f-4cc7-a778-7f32d78a3231-kube-api-access-rp9lr podName:2b05e177-f20f-4cc7-a778-7f32d78a3231 nodeName:}" failed. No retries permitted until 2025-12-13 06:50:40.718454131 +0000 UTC m=+1274.585048712 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-rp9lr" (UniqueName: "kubernetes.io/projected/2b05e177-f20f-4cc7-a778-7f32d78a3231-kube-api-access-rp9lr") pod "openstackclient" (UID: "2b05e177-f20f-4cc7-a778-7f32d78a3231") : failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: User "system:node:crc" cannot create resource "serviceaccounts/token" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.219138 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b05e177-f20f-4cc7-a778-7f32d78a3231-combined-ca-bundle\") pod \"openstackclient\" (UID: \"2b05e177-f20f-4cc7-a778-7f32d78a3231\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.237152 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2b05e177-f20f-4cc7-a778-7f32d78a3231-openstack-config-secret\") pod \"openstackclient\" (UID: \"2b05e177-f20f-4cc7-a778-7f32d78a3231\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.237649 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Dec 13 06:50:40 crc kubenswrapper[5048]: E1213 06:50:40.238312 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-rp9lr], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/openstackclient" podUID="2b05e177-f20f-4cc7-a778-7f32d78a3231" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.246833 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.307593 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.308808 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.318613 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.417383 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/c0fd6a3a-5c9b-4d74-bfba-719758182b08-openstack-config-secret\") pod \"openstackclient\" (UID: \"c0fd6a3a-5c9b-4d74-bfba-719758182b08\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.417496 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0fd6a3a-5c9b-4d74-bfba-719758182b08-combined-ca-bundle\") pod \"openstackclient\" (UID: \"c0fd6a3a-5c9b-4d74-bfba-719758182b08\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.417649 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/c0fd6a3a-5c9b-4d74-bfba-719758182b08-openstack-config\") pod \"openstackclient\" (UID: \"c0fd6a3a-5c9b-4d74-bfba-719758182b08\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.417708 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bpd62\" (UniqueName: \"kubernetes.io/projected/c0fd6a3a-5c9b-4d74-bfba-719758182b08-kube-api-access-bpd62\") pod \"openstackclient\" (UID: \"c0fd6a3a-5c9b-4d74-bfba-719758182b08\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.519423 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/c0fd6a3a-5c9b-4d74-bfba-719758182b08-openstack-config\") pod \"openstackclient\" (UID: \"c0fd6a3a-5c9b-4d74-bfba-719758182b08\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.519500 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bpd62\" (UniqueName: \"kubernetes.io/projected/c0fd6a3a-5c9b-4d74-bfba-719758182b08-kube-api-access-bpd62\") pod \"openstackclient\" (UID: \"c0fd6a3a-5c9b-4d74-bfba-719758182b08\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.520721 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/c0fd6a3a-5c9b-4d74-bfba-719758182b08-openstack-config\") pod \"openstackclient\" (UID: \"c0fd6a3a-5c9b-4d74-bfba-719758182b08\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.522261 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/c0fd6a3a-5c9b-4d74-bfba-719758182b08-openstack-config-secret\") pod \"openstackclient\" (UID: \"c0fd6a3a-5c9b-4d74-bfba-719758182b08\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.522367 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0fd6a3a-5c9b-4d74-bfba-719758182b08-combined-ca-bundle\") pod \"openstackclient\" (UID: \"c0fd6a3a-5c9b-4d74-bfba-719758182b08\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.527147 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0fd6a3a-5c9b-4d74-bfba-719758182b08-combined-ca-bundle\") pod \"openstackclient\" (UID: \"c0fd6a3a-5c9b-4d74-bfba-719758182b08\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.530891 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/c0fd6a3a-5c9b-4d74-bfba-719758182b08-openstack-config-secret\") pod \"openstackclient\" (UID: \"c0fd6a3a-5c9b-4d74-bfba-719758182b08\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.548201 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bpd62\" (UniqueName: \"kubernetes.io/projected/c0fd6a3a-5c9b-4d74-bfba-719758182b08-kube-api-access-bpd62\") pod \"openstackclient\" (UID: \"c0fd6a3a-5c9b-4d74-bfba-719758182b08\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.669068 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.689526 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.725599 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp9lr\" (UniqueName: \"kubernetes.io/projected/2b05e177-f20f-4cc7-a778-7f32d78a3231-kube-api-access-rp9lr\") pod \"openstackclient\" (UID: \"2b05e177-f20f-4cc7-a778-7f32d78a3231\") " pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: E1213 06:50:40.732738 5048 projected.go:194] Error preparing data for projected volume kube-api-access-rp9lr for pod openstack/openstackclient: failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (2b05e177-f20f-4cc7-a778-7f32d78a3231) does not match the UID in record. The object might have been deleted and then recreated Dec 13 06:50:40 crc kubenswrapper[5048]: E1213 06:50:40.732808 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2b05e177-f20f-4cc7-a778-7f32d78a3231-kube-api-access-rp9lr podName:2b05e177-f20f-4cc7-a778-7f32d78a3231 nodeName:}" failed. No retries permitted until 2025-12-13 06:50:41.732789334 +0000 UTC m=+1275.599384005 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-rp9lr" (UniqueName: "kubernetes.io/projected/2b05e177-f20f-4cc7-a778-7f32d78a3231-kube-api-access-rp9lr") pod "openstackclient" (UID: "2b05e177-f20f-4cc7-a778-7f32d78a3231") : failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (2b05e177-f20f-4cc7-a778-7f32d78a3231) does not match the UID in record. The object might have been deleted and then recreated Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.753558 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.758790 5048 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="2b05e177-f20f-4cc7-a778-7f32d78a3231" podUID="c0fd6a3a-5c9b-4d74-bfba-719758182b08" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.940478 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2b05e177-f20f-4cc7-a778-7f32d78a3231-openstack-config\") pod \"2b05e177-f20f-4cc7-a778-7f32d78a3231\" (UID: \"2b05e177-f20f-4cc7-a778-7f32d78a3231\") " Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.940870 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b05e177-f20f-4cc7-a778-7f32d78a3231-combined-ca-bundle\") pod \"2b05e177-f20f-4cc7-a778-7f32d78a3231\" (UID: \"2b05e177-f20f-4cc7-a778-7f32d78a3231\") " Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.940965 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2b05e177-f20f-4cc7-a778-7f32d78a3231-openstack-config-secret\") pod \"2b05e177-f20f-4cc7-a778-7f32d78a3231\" (UID: \"2b05e177-f20f-4cc7-a778-7f32d78a3231\") " Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.941219 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b05e177-f20f-4cc7-a778-7f32d78a3231-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "2b05e177-f20f-4cc7-a778-7f32d78a3231" (UID: "2b05e177-f20f-4cc7-a778-7f32d78a3231"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.941714 5048 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2b05e177-f20f-4cc7-a778-7f32d78a3231-openstack-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.941734 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rp9lr\" (UniqueName: \"kubernetes.io/projected/2b05e177-f20f-4cc7-a778-7f32d78a3231-kube-api-access-rp9lr\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.945346 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b05e177-f20f-4cc7-a778-7f32d78a3231-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "2b05e177-f20f-4cc7-a778-7f32d78a3231" (UID: "2b05e177-f20f-4cc7-a778-7f32d78a3231"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:40 crc kubenswrapper[5048]: I1213 06:50:40.945637 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b05e177-f20f-4cc7-a778-7f32d78a3231-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2b05e177-f20f-4cc7-a778-7f32d78a3231" (UID: "2b05e177-f20f-4cc7-a778-7f32d78a3231"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:41 crc kubenswrapper[5048]: I1213 06:50:41.043880 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b05e177-f20f-4cc7-a778-7f32d78a3231-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:41 crc kubenswrapper[5048]: I1213 06:50:41.043916 5048 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2b05e177-f20f-4cc7-a778-7f32d78a3231-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:41 crc kubenswrapper[5048]: I1213 06:50:41.427088 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Dec 13 06:50:41 crc kubenswrapper[5048]: I1213 06:50:41.699454 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 13 06:50:41 crc kubenswrapper[5048]: I1213 06:50:41.704584 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"c0fd6a3a-5c9b-4d74-bfba-719758182b08","Type":"ContainerStarted","Data":"ff4f848832b651eb42ebdfbf559c919e09d1c0d47d185e2a492aa58a9b3eb53d"} Dec 13 06:50:41 crc kubenswrapper[5048]: I1213 06:50:41.718642 5048 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="2b05e177-f20f-4cc7-a778-7f32d78a3231" podUID="c0fd6a3a-5c9b-4d74-bfba-719758182b08" Dec 13 06:50:42 crc kubenswrapper[5048]: I1213 06:50:42.581517 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b05e177-f20f-4cc7-a778-7f32d78a3231" path="/var/lib/kubelet/pods/2b05e177-f20f-4cc7-a778-7f32d78a3231/volumes" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.322786 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-6c7b8f495-lq789"] Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.324932 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.330514 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.331038 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.331299 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.335072 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-public-tls-certs\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.335190 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-log-httpd\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.335225 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-etc-swift\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.335264 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-combined-ca-bundle\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.335299 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-run-httpd\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.335320 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nt95g\" (UniqueName: \"kubernetes.io/projected/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-kube-api-access-nt95g\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.335347 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-config-data\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.335483 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-internal-tls-certs\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.337885 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6c7b8f495-lq789"] Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.438292 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-public-tls-certs\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.438396 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-etc-swift\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.438448 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-log-httpd\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.438477 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-combined-ca-bundle\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.438517 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-run-httpd\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.438532 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nt95g\" (UniqueName: \"kubernetes.io/projected/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-kube-api-access-nt95g\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.438551 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-config-data\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.438661 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-internal-tls-certs\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.440989 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-run-httpd\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.441689 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-log-httpd\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.446164 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-etc-swift\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.446416 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-internal-tls-certs\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.447716 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-public-tls-certs\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.449067 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-combined-ca-bundle\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.449886 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-config-data\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.456367 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nt95g\" (UniqueName: \"kubernetes.io/projected/9d13568e-517b-46ae-b3bd-dfa6ee7b671a-kube-api-access-nt95g\") pod \"swift-proxy-6c7b8f495-lq789\" (UID: \"9d13568e-517b-46ae-b3bd-dfa6ee7b671a\") " pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.467803 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 13 06:50:45 crc kubenswrapper[5048]: I1213 06:50:45.666602 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:46 crc kubenswrapper[5048]: I1213 06:50:46.752303 5048 generic.go:334] "Generic (PLEG): container finished" podID="2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" containerID="dcaa41ecef403c2cc5003430aa31dcacd971aacf80d5fca4cabc0e4a0c57b682" exitCode=137 Dec 13 06:50:46 crc kubenswrapper[5048]: I1213 06:50:46.752356 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b","Type":"ContainerDied","Data":"dcaa41ecef403c2cc5003430aa31dcacd971aacf80d5fca4cabc0e4a0c57b682"} Dec 13 06:50:46 crc kubenswrapper[5048]: I1213 06:50:46.792878 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5fdc45567b-kg45h" podUID="3c13197f-14c3-45a9-ba9c-bc89b80d6169" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.145:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.145:8443: connect: connection refused" Dec 13 06:50:46 crc kubenswrapper[5048]: I1213 06:50:46.792996 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.152298 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.280655 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-sg-core-conf-yaml\") pod \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.281310 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-run-httpd\") pod \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.281400 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-scripts\") pod \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.281419 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-config-data\") pod \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.281528 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nfpxd\" (UniqueName: \"kubernetes.io/projected/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-kube-api-access-nfpxd\") pod \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.282841 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" (UID: "2a87c44c-d2c7-4adf-bdbb-fae9a79b955b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.283780 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-combined-ca-bundle\") pod \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.284334 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-log-httpd\") pod \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\" (UID: \"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b\") " Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.284917 5048 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.285073 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" (UID: "2a87c44c-d2c7-4adf-bdbb-fae9a79b955b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.289843 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-kube-api-access-nfpxd" (OuterVolumeSpecName: "kube-api-access-nfpxd") pod "2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" (UID: "2a87c44c-d2c7-4adf-bdbb-fae9a79b955b"). InnerVolumeSpecName "kube-api-access-nfpxd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.290612 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-scripts" (OuterVolumeSpecName: "scripts") pod "2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" (UID: "2a87c44c-d2c7-4adf-bdbb-fae9a79b955b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.315970 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" (UID: "2a87c44c-d2c7-4adf-bdbb-fae9a79b955b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.375138 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" (UID: "2a87c44c-d2c7-4adf-bdbb-fae9a79b955b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.391153 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.391420 5048 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.391547 5048 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.391677 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.391757 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nfpxd\" (UniqueName: \"kubernetes.io/projected/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-kube-api-access-nfpxd\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.407142 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-config-data" (OuterVolumeSpecName: "config-data") pod "2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" (UID: "2a87c44c-d2c7-4adf-bdbb-fae9a79b955b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:51 crc kubenswrapper[5048]: E1213 06:50:51.442070 5048 fsHandler.go:119] failed to collect filesystem stats - rootDiskErr: could not stat "/var/lib/containers/storage/overlay/b32f17d5117eed0f0f9ed0d4a9122b76c120d35d0e70d59caf07ec24ab128c97/diff" to get inode usage: stat /var/lib/containers/storage/overlay/b32f17d5117eed0f0f9ed0d4a9122b76c120d35d0e70d59caf07ec24ab128c97/diff: no such file or directory, extraDiskErr: could not stat "/var/log/pods/openstack_neutron-7b89f46c6-sjtfz_a6dbb4d5-4873-4bd5-a300-6bf8334f14db/neutron-api/0.log" to get inode usage: stat /var/log/pods/openstack_neutron-7b89f46c6-sjtfz_a6dbb4d5-4873-4bd5-a300-6bf8334f14db/neutron-api/0.log: no such file or directory Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.493047 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.564799 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6c7b8f495-lq789"] Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.797300 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6c7b8f495-lq789" event={"ID":"9d13568e-517b-46ae-b3bd-dfa6ee7b671a","Type":"ContainerStarted","Data":"c170862dff4fe7ba243876105ea893d18ecb31aa10369166c9723899e385694a"} Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.803482 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a87c44c-d2c7-4adf-bdbb-fae9a79b955b","Type":"ContainerDied","Data":"b410c0704097d8afe05b1eaaa993c96a87517871e5f6a94b0746ece946f771a1"} Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.803541 5048 scope.go:117] "RemoveContainer" containerID="dcaa41ecef403c2cc5003430aa31dcacd971aacf80d5fca4cabc0e4a0c57b682" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.803674 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.807655 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"c0fd6a3a-5c9b-4d74-bfba-719758182b08","Type":"ContainerStarted","Data":"b273d8189f80cded944a4420e24f5cceeb02514bbda5b071e23e2c6851457344"} Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.829641 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.192849459 podStartE2EDuration="11.829616924s" podCreationTimestamp="2025-12-13 06:50:40 +0000 UTC" firstStartedPulling="2025-12-13 06:50:41.436582255 +0000 UTC m=+1275.303176836" lastFinishedPulling="2025-12-13 06:50:51.07334972 +0000 UTC m=+1284.939944301" observedRunningTime="2025-12-13 06:50:51.824590407 +0000 UTC m=+1285.691185008" watchObservedRunningTime="2025-12-13 06:50:51.829616924 +0000 UTC m=+1285.696211505" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.830579 5048 scope.go:117] "RemoveContainer" containerID="a018e2fc6d16a78d973ff5dbf2bbd7e1be2eb4ee7c9c05b0c8b477d00a261cff" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.867228 5048 scope.go:117] "RemoveContainer" containerID="69d56a5567066958fafe92b1b8664aff26f228b91e5c1972493bb8b42cce91b9" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.900586 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.904739 5048 scope.go:117] "RemoveContainer" containerID="03d5d3b195f7ca1dd1901cf64b9fb2ba462e881422823475e43e1e4bb15b4f76" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.911257 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.927068 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:50:51 crc kubenswrapper[5048]: E1213 06:50:51.927560 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" containerName="sg-core" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.927581 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" containerName="sg-core" Dec 13 06:50:51 crc kubenswrapper[5048]: E1213 06:50:51.927613 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" containerName="ceilometer-notification-agent" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.927620 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" containerName="ceilometer-notification-agent" Dec 13 06:50:51 crc kubenswrapper[5048]: E1213 06:50:51.927640 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" containerName="ceilometer-central-agent" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.927647 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" containerName="ceilometer-central-agent" Dec 13 06:50:51 crc kubenswrapper[5048]: E1213 06:50:51.927657 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" containerName="proxy-httpd" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.927662 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" containerName="proxy-httpd" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.927819 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" containerName="proxy-httpd" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.927826 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" containerName="ceilometer-notification-agent" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.927850 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" containerName="sg-core" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.927859 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" containerName="ceilometer-central-agent" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.929525 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.933904 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.933964 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 13 06:50:51 crc kubenswrapper[5048]: I1213 06:50:51.950019 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.086177 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:50:52 crc kubenswrapper[5048]: E1213 06:50:52.086928 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle config-data kube-api-access-r8rl2 log-httpd run-httpd scripts sg-core-conf-yaml], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/ceilometer-0" podUID="d7837559-ab50-467d-b8c8-b12067d45288" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.109969 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-config-data\") pod \"ceilometer-0\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.110058 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-scripts\") pod \"ceilometer-0\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.110093 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d7837559-ab50-467d-b8c8-b12067d45288-run-httpd\") pod \"ceilometer-0\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.110114 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8rl2\" (UniqueName: \"kubernetes.io/projected/d7837559-ab50-467d-b8c8-b12067d45288-kube-api-access-r8rl2\") pod \"ceilometer-0\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.110149 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.110178 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d7837559-ab50-467d-b8c8-b12067d45288-log-httpd\") pod \"ceilometer-0\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.110205 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.212034 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-config-data\") pod \"ceilometer-0\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.212165 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-scripts\") pod \"ceilometer-0\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.212204 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d7837559-ab50-467d-b8c8-b12067d45288-run-httpd\") pod \"ceilometer-0\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.212229 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8rl2\" (UniqueName: \"kubernetes.io/projected/d7837559-ab50-467d-b8c8-b12067d45288-kube-api-access-r8rl2\") pod \"ceilometer-0\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.212268 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.212295 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d7837559-ab50-467d-b8c8-b12067d45288-log-httpd\") pod \"ceilometer-0\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.212338 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.213739 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d7837559-ab50-467d-b8c8-b12067d45288-run-httpd\") pod \"ceilometer-0\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.214052 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d7837559-ab50-467d-b8c8-b12067d45288-log-httpd\") pod \"ceilometer-0\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.220629 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-scripts\") pod \"ceilometer-0\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.221041 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.225393 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.227748 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-config-data\") pod \"ceilometer-0\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.245537 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8rl2\" (UniqueName: \"kubernetes.io/projected/d7837559-ab50-467d-b8c8-b12067d45288-kube-api-access-r8rl2\") pod \"ceilometer-0\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: E1213 06:50:52.367475 5048 manager.go:1116] Failed to create existing container: /kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda6dbb4d5_4873_4bd5_a300_6bf8334f14db.slice/crio-0e257bdbbd34a08996fbf401438f52d6a87ed5fff22ad4822bed5e321801b1c8: Error finding container 0e257bdbbd34a08996fbf401438f52d6a87ed5fff22ad4822bed5e321801b1c8: Status 404 returned error can't find the container with id 0e257bdbbd34a08996fbf401438f52d6a87ed5fff22ad4822bed5e321801b1c8 Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.578590 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a87c44c-d2c7-4adf-bdbb-fae9a79b955b" path="/var/lib/kubelet/pods/2a87c44c-d2c7-4adf-bdbb-fae9a79b955b/volumes" Dec 13 06:50:52 crc kubenswrapper[5048]: E1213 06:50:52.616547 5048 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3c13197f_14c3_45a9_ba9c_bc89b80d6169.slice/crio-conmon-97618be3b26b3b31a7f8fcb9a3ed26dd8767012b12160a1b25cdadbb069ffb87.scope\": RecentStats: unable to find data in memory cache]" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.819340 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6c7b8f495-lq789" event={"ID":"9d13568e-517b-46ae-b3bd-dfa6ee7b671a","Type":"ContainerStarted","Data":"c3aa89ec8bd0c1e46ebaa8ba4d505d4523530f6ab6564d36ea96801c47acd28d"} Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.819397 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6c7b8f495-lq789" event={"ID":"9d13568e-517b-46ae-b3bd-dfa6ee7b671a","Type":"ContainerStarted","Data":"2bd908c1c8cfefa3bcdd19c55fc59590d739cfee00c9e071f3a89dc211ac60c7"} Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.819422 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.819493 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.822798 5048 generic.go:334] "Generic (PLEG): container finished" podID="3c13197f-14c3-45a9-ba9c-bc89b80d6169" containerID="97618be3b26b3b31a7f8fcb9a3ed26dd8767012b12160a1b25cdadbb069ffb87" exitCode=137 Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.822896 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.823275 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fdc45567b-kg45h" event={"ID":"3c13197f-14c3-45a9-ba9c-bc89b80d6169","Type":"ContainerDied","Data":"97618be3b26b3b31a7f8fcb9a3ed26dd8767012b12160a1b25cdadbb069ffb87"} Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.831750 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.847062 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-6c7b8f495-lq789" podStartSLOduration=7.847041375 podStartE2EDuration="7.847041375s" podCreationTimestamp="2025-12-13 06:50:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:50:52.839642872 +0000 UTC m=+1286.706237533" watchObservedRunningTime="2025-12-13 06:50:52.847041375 +0000 UTC m=+1286.713635966" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.891249 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-ws26t"] Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.892326 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-ws26t" Dec 13 06:50:52 crc kubenswrapper[5048]: I1213 06:50:52.915289 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-ws26t"] Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.001019 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-cn4pn"] Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.002351 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-cn4pn" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.021993 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-cn4pn"] Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.028403 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d7837559-ab50-467d-b8c8-b12067d45288-run-httpd\") pod \"d7837559-ab50-467d-b8c8-b12067d45288\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.028504 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r8rl2\" (UniqueName: \"kubernetes.io/projected/d7837559-ab50-467d-b8c8-b12067d45288-kube-api-access-r8rl2\") pod \"d7837559-ab50-467d-b8c8-b12067d45288\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.028548 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-sg-core-conf-yaml\") pod \"d7837559-ab50-467d-b8c8-b12067d45288\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.028593 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-scripts\") pod \"d7837559-ab50-467d-b8c8-b12067d45288\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.028642 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-combined-ca-bundle\") pod \"d7837559-ab50-467d-b8c8-b12067d45288\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.028699 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-config-data\") pod \"d7837559-ab50-467d-b8c8-b12067d45288\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.028762 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d7837559-ab50-467d-b8c8-b12067d45288-log-httpd\") pod \"d7837559-ab50-467d-b8c8-b12067d45288\" (UID: \"d7837559-ab50-467d-b8c8-b12067d45288\") " Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.029324 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7837559-ab50-467d-b8c8-b12067d45288-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d7837559-ab50-467d-b8c8-b12067d45288" (UID: "d7837559-ab50-467d-b8c8-b12067d45288"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.029612 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7837559-ab50-467d-b8c8-b12067d45288-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d7837559-ab50-467d-b8c8-b12067d45288" (UID: "d7837559-ab50-467d-b8c8-b12067d45288"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.030862 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d58379fb-a3bb-4853-995d-2df64fa912d4-operator-scripts\") pod \"nova-api-db-create-ws26t\" (UID: \"d58379fb-a3bb-4853-995d-2df64fa912d4\") " pod="openstack/nova-api-db-create-ws26t" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.030922 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjmzx\" (UniqueName: \"kubernetes.io/projected/d58379fb-a3bb-4853-995d-2df64fa912d4-kube-api-access-cjmzx\") pod \"nova-api-db-create-ws26t\" (UID: \"d58379fb-a3bb-4853-995d-2df64fa912d4\") " pod="openstack/nova-api-db-create-ws26t" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.031201 5048 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d7837559-ab50-467d-b8c8-b12067d45288-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.031220 5048 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d7837559-ab50-467d-b8c8-b12067d45288-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.035223 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7837559-ab50-467d-b8c8-b12067d45288-kube-api-access-r8rl2" (OuterVolumeSpecName: "kube-api-access-r8rl2") pod "d7837559-ab50-467d-b8c8-b12067d45288" (UID: "d7837559-ab50-467d-b8c8-b12067d45288"). InnerVolumeSpecName "kube-api-access-r8rl2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.037147 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-config-data" (OuterVolumeSpecName: "config-data") pod "d7837559-ab50-467d-b8c8-b12067d45288" (UID: "d7837559-ab50-467d-b8c8-b12067d45288"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.038171 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-scripts" (OuterVolumeSpecName: "scripts") pod "d7837559-ab50-467d-b8c8-b12067d45288" (UID: "d7837559-ab50-467d-b8c8-b12067d45288"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.055623 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d7837559-ab50-467d-b8c8-b12067d45288" (UID: "d7837559-ab50-467d-b8c8-b12067d45288"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.056658 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d7837559-ab50-467d-b8c8-b12067d45288" (UID: "d7837559-ab50-467d-b8c8-b12067d45288"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.132815 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d58379fb-a3bb-4853-995d-2df64fa912d4-operator-scripts\") pod \"nova-api-db-create-ws26t\" (UID: \"d58379fb-a3bb-4853-995d-2df64fa912d4\") " pod="openstack/nova-api-db-create-ws26t" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.132862 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjmzx\" (UniqueName: \"kubernetes.io/projected/d58379fb-a3bb-4853-995d-2df64fa912d4-kube-api-access-cjmzx\") pod \"nova-api-db-create-ws26t\" (UID: \"d58379fb-a3bb-4853-995d-2df64fa912d4\") " pod="openstack/nova-api-db-create-ws26t" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.132900 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nkhr\" (UniqueName: \"kubernetes.io/projected/c250e8c3-7775-44f8-8544-51e69bdaff07-kube-api-access-5nkhr\") pod \"nova-cell0-db-create-cn4pn\" (UID: \"c250e8c3-7775-44f8-8544-51e69bdaff07\") " pod="openstack/nova-cell0-db-create-cn4pn" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.132979 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c250e8c3-7775-44f8-8544-51e69bdaff07-operator-scripts\") pod \"nova-cell0-db-create-cn4pn\" (UID: \"c250e8c3-7775-44f8-8544-51e69bdaff07\") " pod="openstack/nova-cell0-db-create-cn4pn" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.133036 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r8rl2\" (UniqueName: \"kubernetes.io/projected/d7837559-ab50-467d-b8c8-b12067d45288-kube-api-access-r8rl2\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.133048 5048 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.133057 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.133066 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.133074 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7837559-ab50-467d-b8c8-b12067d45288-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.133714 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d58379fb-a3bb-4853-995d-2df64fa912d4-operator-scripts\") pod \"nova-api-db-create-ws26t\" (UID: \"d58379fb-a3bb-4853-995d-2df64fa912d4\") " pod="openstack/nova-api-db-create-ws26t" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.142250 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-7adf-account-create-update-52mfj"] Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.143379 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7adf-account-create-update-52mfj" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.145611 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.153892 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjmzx\" (UniqueName: \"kubernetes.io/projected/d58379fb-a3bb-4853-995d-2df64fa912d4-kube-api-access-cjmzx\") pod \"nova-api-db-create-ws26t\" (UID: \"d58379fb-a3bb-4853-995d-2df64fa912d4\") " pod="openstack/nova-api-db-create-ws26t" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.153965 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-jnf7w"] Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.155516 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-jnf7w" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.175395 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-7adf-account-create-update-52mfj"] Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.193265 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-jnf7w"] Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.204677 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-ws26t" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.234719 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nkhr\" (UniqueName: \"kubernetes.io/projected/c250e8c3-7775-44f8-8544-51e69bdaff07-kube-api-access-5nkhr\") pod \"nova-cell0-db-create-cn4pn\" (UID: \"c250e8c3-7775-44f8-8544-51e69bdaff07\") " pod="openstack/nova-cell0-db-create-cn4pn" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.234828 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c250e8c3-7775-44f8-8544-51e69bdaff07-operator-scripts\") pod \"nova-cell0-db-create-cn4pn\" (UID: \"c250e8c3-7775-44f8-8544-51e69bdaff07\") " pod="openstack/nova-cell0-db-create-cn4pn" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.262176 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c250e8c3-7775-44f8-8544-51e69bdaff07-operator-scripts\") pod \"nova-cell0-db-create-cn4pn\" (UID: \"c250e8c3-7775-44f8-8544-51e69bdaff07\") " pod="openstack/nova-cell0-db-create-cn4pn" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.284188 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nkhr\" (UniqueName: \"kubernetes.io/projected/c250e8c3-7775-44f8-8544-51e69bdaff07-kube-api-access-5nkhr\") pod \"nova-cell0-db-create-cn4pn\" (UID: \"c250e8c3-7775-44f8-8544-51e69bdaff07\") " pod="openstack/nova-cell0-db-create-cn4pn" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.309497 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-081b-account-create-update-lvp4w"] Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.311096 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-081b-account-create-update-lvp4w" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.313009 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.316219 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-081b-account-create-update-lvp4w"] Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.320221 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-cn4pn" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.327148 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.335956 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cbf8637-a57a-4dd4-a175-6ab0886adbc2-operator-scripts\") pod \"nova-cell1-db-create-jnf7w\" (UID: \"2cbf8637-a57a-4dd4-a175-6ab0886adbc2\") " pod="openstack/nova-cell1-db-create-jnf7w" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.336078 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95sx5\" (UniqueName: \"kubernetes.io/projected/2cbf8637-a57a-4dd4-a175-6ab0886adbc2-kube-api-access-95sx5\") pod \"nova-cell1-db-create-jnf7w\" (UID: \"2cbf8637-a57a-4dd4-a175-6ab0886adbc2\") " pod="openstack/nova-cell1-db-create-jnf7w" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.336165 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/030ae47b-71e7-4fb3-b518-f807e1fda0a1-operator-scripts\") pod \"nova-api-7adf-account-create-update-52mfj\" (UID: \"030ae47b-71e7-4fb3-b518-f807e1fda0a1\") " pod="openstack/nova-api-7adf-account-create-update-52mfj" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.336198 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ngdq2\" (UniqueName: \"kubernetes.io/projected/030ae47b-71e7-4fb3-b518-f807e1fda0a1-kube-api-access-ngdq2\") pod \"nova-api-7adf-account-create-update-52mfj\" (UID: \"030ae47b-71e7-4fb3-b518-f807e1fda0a1\") " pod="openstack/nova-api-7adf-account-create-update-52mfj" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.437319 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c13197f-14c3-45a9-ba9c-bc89b80d6169-combined-ca-bundle\") pod \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.437471 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n82nj\" (UniqueName: \"kubernetes.io/projected/3c13197f-14c3-45a9-ba9c-bc89b80d6169-kube-api-access-n82nj\") pod \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.437564 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3c13197f-14c3-45a9-ba9c-bc89b80d6169-config-data\") pod \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.437616 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c13197f-14c3-45a9-ba9c-bc89b80d6169-horizon-tls-certs\") pod \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.437709 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3c13197f-14c3-45a9-ba9c-bc89b80d6169-horizon-secret-key\") pod \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.437760 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3c13197f-14c3-45a9-ba9c-bc89b80d6169-scripts\") pod \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.437787 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c13197f-14c3-45a9-ba9c-bc89b80d6169-logs\") pod \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\" (UID: \"3c13197f-14c3-45a9-ba9c-bc89b80d6169\") " Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.438053 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ebfb097-2554-4605-bf3c-a545907fbaa6-operator-scripts\") pod \"nova-cell0-081b-account-create-update-lvp4w\" (UID: \"8ebfb097-2554-4605-bf3c-a545907fbaa6\") " pod="openstack/nova-cell0-081b-account-create-update-lvp4w" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.438160 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95sx5\" (UniqueName: \"kubernetes.io/projected/2cbf8637-a57a-4dd4-a175-6ab0886adbc2-kube-api-access-95sx5\") pod \"nova-cell1-db-create-jnf7w\" (UID: \"2cbf8637-a57a-4dd4-a175-6ab0886adbc2\") " pod="openstack/nova-cell1-db-create-jnf7w" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.438239 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/030ae47b-71e7-4fb3-b518-f807e1fda0a1-operator-scripts\") pod \"nova-api-7adf-account-create-update-52mfj\" (UID: \"030ae47b-71e7-4fb3-b518-f807e1fda0a1\") " pod="openstack/nova-api-7adf-account-create-update-52mfj" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.438276 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ngdq2\" (UniqueName: \"kubernetes.io/projected/030ae47b-71e7-4fb3-b518-f807e1fda0a1-kube-api-access-ngdq2\") pod \"nova-api-7adf-account-create-update-52mfj\" (UID: \"030ae47b-71e7-4fb3-b518-f807e1fda0a1\") " pod="openstack/nova-api-7adf-account-create-update-52mfj" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.438330 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hx47g\" (UniqueName: \"kubernetes.io/projected/8ebfb097-2554-4605-bf3c-a545907fbaa6-kube-api-access-hx47g\") pod \"nova-cell0-081b-account-create-update-lvp4w\" (UID: \"8ebfb097-2554-4605-bf3c-a545907fbaa6\") " pod="openstack/nova-cell0-081b-account-create-update-lvp4w" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.438396 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cbf8637-a57a-4dd4-a175-6ab0886adbc2-operator-scripts\") pod \"nova-cell1-db-create-jnf7w\" (UID: \"2cbf8637-a57a-4dd4-a175-6ab0886adbc2\") " pod="openstack/nova-cell1-db-create-jnf7w" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.439664 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cbf8637-a57a-4dd4-a175-6ab0886adbc2-operator-scripts\") pod \"nova-cell1-db-create-jnf7w\" (UID: \"2cbf8637-a57a-4dd4-a175-6ab0886adbc2\") " pod="openstack/nova-cell1-db-create-jnf7w" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.439870 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c13197f-14c3-45a9-ba9c-bc89b80d6169-logs" (OuterVolumeSpecName: "logs") pod "3c13197f-14c3-45a9-ba9c-bc89b80d6169" (UID: "3c13197f-14c3-45a9-ba9c-bc89b80d6169"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.440179 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/030ae47b-71e7-4fb3-b518-f807e1fda0a1-operator-scripts\") pod \"nova-api-7adf-account-create-update-52mfj\" (UID: \"030ae47b-71e7-4fb3-b518-f807e1fda0a1\") " pod="openstack/nova-api-7adf-account-create-update-52mfj" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.445063 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c13197f-14c3-45a9-ba9c-bc89b80d6169-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "3c13197f-14c3-45a9-ba9c-bc89b80d6169" (UID: "3c13197f-14c3-45a9-ba9c-bc89b80d6169"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.452910 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c13197f-14c3-45a9-ba9c-bc89b80d6169-kube-api-access-n82nj" (OuterVolumeSpecName: "kube-api-access-n82nj") pod "3c13197f-14c3-45a9-ba9c-bc89b80d6169" (UID: "3c13197f-14c3-45a9-ba9c-bc89b80d6169"). InnerVolumeSpecName "kube-api-access-n82nj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.477894 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c13197f-14c3-45a9-ba9c-bc89b80d6169-config-data" (OuterVolumeSpecName: "config-data") pod "3c13197f-14c3-45a9-ba9c-bc89b80d6169" (UID: "3c13197f-14c3-45a9-ba9c-bc89b80d6169"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.478482 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ngdq2\" (UniqueName: \"kubernetes.io/projected/030ae47b-71e7-4fb3-b518-f807e1fda0a1-kube-api-access-ngdq2\") pod \"nova-api-7adf-account-create-update-52mfj\" (UID: \"030ae47b-71e7-4fb3-b518-f807e1fda0a1\") " pod="openstack/nova-api-7adf-account-create-update-52mfj" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.483000 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95sx5\" (UniqueName: \"kubernetes.io/projected/2cbf8637-a57a-4dd4-a175-6ab0886adbc2-kube-api-access-95sx5\") pod \"nova-cell1-db-create-jnf7w\" (UID: \"2cbf8637-a57a-4dd4-a175-6ab0886adbc2\") " pod="openstack/nova-cell1-db-create-jnf7w" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.505241 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c13197f-14c3-45a9-ba9c-bc89b80d6169-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3c13197f-14c3-45a9-ba9c-bc89b80d6169" (UID: "3c13197f-14c3-45a9-ba9c-bc89b80d6169"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.518562 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-f16a-account-create-update-kwscr"] Dec 13 06:50:53 crc kubenswrapper[5048]: E1213 06:50:53.519006 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c13197f-14c3-45a9-ba9c-bc89b80d6169" containerName="horizon" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.519023 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c13197f-14c3-45a9-ba9c-bc89b80d6169" containerName="horizon" Dec 13 06:50:53 crc kubenswrapper[5048]: E1213 06:50:53.519058 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c13197f-14c3-45a9-ba9c-bc89b80d6169" containerName="horizon-log" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.519064 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c13197f-14c3-45a9-ba9c-bc89b80d6169" containerName="horizon-log" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.519209 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c13197f-14c3-45a9-ba9c-bc89b80d6169" containerName="horizon" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.519222 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c13197f-14c3-45a9-ba9c-bc89b80d6169" containerName="horizon-log" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.519963 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-f16a-account-create-update-kwscr" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.527466 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.531387 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c13197f-14c3-45a9-ba9c-bc89b80d6169-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "3c13197f-14c3-45a9-ba9c-bc89b80d6169" (UID: "3c13197f-14c3-45a9-ba9c-bc89b80d6169"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.541211 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hx47g\" (UniqueName: \"kubernetes.io/projected/8ebfb097-2554-4605-bf3c-a545907fbaa6-kube-api-access-hx47g\") pod \"nova-cell0-081b-account-create-update-lvp4w\" (UID: \"8ebfb097-2554-4605-bf3c-a545907fbaa6\") " pod="openstack/nova-cell0-081b-account-create-update-lvp4w" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.541299 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ebfb097-2554-4605-bf3c-a545907fbaa6-operator-scripts\") pod \"nova-cell0-081b-account-create-update-lvp4w\" (UID: \"8ebfb097-2554-4605-bf3c-a545907fbaa6\") " pod="openstack/nova-cell0-081b-account-create-update-lvp4w" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.541420 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3c13197f-14c3-45a9-ba9c-bc89b80d6169-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.541460 5048 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c13197f-14c3-45a9-ba9c-bc89b80d6169-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.541474 5048 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3c13197f-14c3-45a9-ba9c-bc89b80d6169-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.541486 5048 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c13197f-14c3-45a9-ba9c-bc89b80d6169-logs\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.541495 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c13197f-14c3-45a9-ba9c-bc89b80d6169-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.541505 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n82nj\" (UniqueName: \"kubernetes.io/projected/3c13197f-14c3-45a9-ba9c-bc89b80d6169-kube-api-access-n82nj\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.542323 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ebfb097-2554-4605-bf3c-a545907fbaa6-operator-scripts\") pod \"nova-cell0-081b-account-create-update-lvp4w\" (UID: \"8ebfb097-2554-4605-bf3c-a545907fbaa6\") " pod="openstack/nova-cell0-081b-account-create-update-lvp4w" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.544974 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-f16a-account-create-update-kwscr"] Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.560345 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c13197f-14c3-45a9-ba9c-bc89b80d6169-scripts" (OuterVolumeSpecName: "scripts") pod "3c13197f-14c3-45a9-ba9c-bc89b80d6169" (UID: "3c13197f-14c3-45a9-ba9c-bc89b80d6169"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.561701 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hx47g\" (UniqueName: \"kubernetes.io/projected/8ebfb097-2554-4605-bf3c-a545907fbaa6-kube-api-access-hx47g\") pod \"nova-cell0-081b-account-create-update-lvp4w\" (UID: \"8ebfb097-2554-4605-bf3c-a545907fbaa6\") " pod="openstack/nova-cell0-081b-account-create-update-lvp4w" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.599831 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7adf-account-create-update-52mfj" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.617923 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-jnf7w" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.643022 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vm67\" (UniqueName: \"kubernetes.io/projected/55848283-b878-4f02-b76f-aaf4ce0d6765-kube-api-access-6vm67\") pod \"nova-cell1-f16a-account-create-update-kwscr\" (UID: \"55848283-b878-4f02-b76f-aaf4ce0d6765\") " pod="openstack/nova-cell1-f16a-account-create-update-kwscr" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.643086 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55848283-b878-4f02-b76f-aaf4ce0d6765-operator-scripts\") pod \"nova-cell1-f16a-account-create-update-kwscr\" (UID: \"55848283-b878-4f02-b76f-aaf4ce0d6765\") " pod="openstack/nova-cell1-f16a-account-create-update-kwscr" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.643145 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3c13197f-14c3-45a9-ba9c-bc89b80d6169-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.662324 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-081b-account-create-update-lvp4w" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.744987 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vm67\" (UniqueName: \"kubernetes.io/projected/55848283-b878-4f02-b76f-aaf4ce0d6765-kube-api-access-6vm67\") pod \"nova-cell1-f16a-account-create-update-kwscr\" (UID: \"55848283-b878-4f02-b76f-aaf4ce0d6765\") " pod="openstack/nova-cell1-f16a-account-create-update-kwscr" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.745051 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55848283-b878-4f02-b76f-aaf4ce0d6765-operator-scripts\") pod \"nova-cell1-f16a-account-create-update-kwscr\" (UID: \"55848283-b878-4f02-b76f-aaf4ce0d6765\") " pod="openstack/nova-cell1-f16a-account-create-update-kwscr" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.746270 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55848283-b878-4f02-b76f-aaf4ce0d6765-operator-scripts\") pod \"nova-cell1-f16a-account-create-update-kwscr\" (UID: \"55848283-b878-4f02-b76f-aaf4ce0d6765\") " pod="openstack/nova-cell1-f16a-account-create-update-kwscr" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.770514 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vm67\" (UniqueName: \"kubernetes.io/projected/55848283-b878-4f02-b76f-aaf4ce0d6765-kube-api-access-6vm67\") pod \"nova-cell1-f16a-account-create-update-kwscr\" (UID: \"55848283-b878-4f02-b76f-aaf4ce0d6765\") " pod="openstack/nova-cell1-f16a-account-create-update-kwscr" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.802149 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-ws26t"] Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.846822 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-f16a-account-create-update-kwscr" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.874807 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.877370 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fdc45567b-kg45h" Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.877558 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fdc45567b-kg45h" event={"ID":"3c13197f-14c3-45a9-ba9c-bc89b80d6169","Type":"ContainerDied","Data":"b0986a3e3cb62bf90ec7ac3c701c784c83e379ea416081bd25cafd19d23664d8"} Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.877635 5048 scope.go:117] "RemoveContainer" containerID="a4c525fef07828f178bd4736adb2426b714431b369912c1d75d9b97962f6a924" Dec 13 06:50:53 crc kubenswrapper[5048]: W1213 06:50:53.879769 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd58379fb_a3bb_4853_995d_2df64fa912d4.slice/crio-23fb2eccde4d97cc65cd487e0a3bdc89456610b0b61f47fb5ba22e3c8db39c06 WatchSource:0}: Error finding container 23fb2eccde4d97cc65cd487e0a3bdc89456610b0b61f47fb5ba22e3c8db39c06: Status 404 returned error can't find the container with id 23fb2eccde4d97cc65cd487e0a3bdc89456610b0b61f47fb5ba22e3c8db39c06 Dec 13 06:50:53 crc kubenswrapper[5048]: I1213 06:50:53.960318 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-cn4pn"] Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.224617 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.248171 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.263255 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.265796 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.269975 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.270195 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.273082 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5fdc45567b-kg45h"] Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.282111 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5fdc45567b-kg45h"] Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.294929 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.358863 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.358939 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-scripts\") pod \"ceilometer-0\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.358976 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-run-httpd\") pod \"ceilometer-0\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.359236 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-log-httpd\") pod \"ceilometer-0\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.359287 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.359428 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xfknn\" (UniqueName: \"kubernetes.io/projected/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-kube-api-access-xfknn\") pod \"ceilometer-0\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.359478 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-config-data\") pod \"ceilometer-0\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.383610 5048 scope.go:117] "RemoveContainer" containerID="97618be3b26b3b31a7f8fcb9a3ed26dd8767012b12160a1b25cdadbb069ffb87" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.440697 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-7adf-account-create-update-52mfj"] Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.460621 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-scripts\") pod \"ceilometer-0\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.460663 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-run-httpd\") pod \"ceilometer-0\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.460747 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-log-httpd\") pod \"ceilometer-0\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.460769 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.460822 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xfknn\" (UniqueName: \"kubernetes.io/projected/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-kube-api-access-xfknn\") pod \"ceilometer-0\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.460843 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-config-data\") pod \"ceilometer-0\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.460880 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.462063 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-run-httpd\") pod \"ceilometer-0\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.462253 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-log-httpd\") pod \"ceilometer-0\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.463192 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-jnf7w"] Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.469335 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-scripts\") pod \"ceilometer-0\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.469368 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.469972 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-config-data\") pod \"ceilometer-0\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.470037 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.483752 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xfknn\" (UniqueName: \"kubernetes.io/projected/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-kube-api-access-xfknn\") pod \"ceilometer-0\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.525908 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.526830 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.584172 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c13197f-14c3-45a9-ba9c-bc89b80d6169" path="/var/lib/kubelet/pods/3c13197f-14c3-45a9-ba9c-bc89b80d6169/volumes" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.584872 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7837559-ab50-467d-b8c8-b12067d45288" path="/var/lib/kubelet/pods/d7837559-ab50-467d-b8c8-b12067d45288/volumes" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.601343 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-f16a-account-create-update-kwscr"] Dec 13 06:50:54 crc kubenswrapper[5048]: W1213 06:50:54.611934 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55848283_b878_4f02_b76f_aaf4ce0d6765.slice/crio-68ac5be8cfdbc0a27775026e83afa84467a1570b9541fa373cba6db4c29b39c1 WatchSource:0}: Error finding container 68ac5be8cfdbc0a27775026e83afa84467a1570b9541fa373cba6db4c29b39c1: Status 404 returned error can't find the container with id 68ac5be8cfdbc0a27775026e83afa84467a1570b9541fa373cba6db4c29b39c1 Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.613914 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-081b-account-create-update-lvp4w"] Dec 13 06:50:54 crc kubenswrapper[5048]: W1213 06:50:54.617261 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8ebfb097_2554_4605_bf3c_a545907fbaa6.slice/crio-99e960204950aab39b698e30feed01b228d877d1629e1b5a77c020b022abf650 WatchSource:0}: Error finding container 99e960204950aab39b698e30feed01b228d877d1629e1b5a77c020b022abf650: Status 404 returned error can't find the container with id 99e960204950aab39b698e30feed01b228d877d1629e1b5a77c020b022abf650 Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.887266 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-jnf7w" event={"ID":"2cbf8637-a57a-4dd4-a175-6ab0886adbc2","Type":"ContainerStarted","Data":"5ba3c6618250ebca7d8202c548d345e45d04eaa0e75028a32e3ac7fae05b06f1"} Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.889398 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-ws26t" event={"ID":"d58379fb-a3bb-4853-995d-2df64fa912d4","Type":"ContainerStarted","Data":"82b7d04cc4842f3ec347f5de3aec97895d8b06b0b5c32ac67d2ef77c93994b9f"} Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.889497 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-ws26t" event={"ID":"d58379fb-a3bb-4853-995d-2df64fa912d4","Type":"ContainerStarted","Data":"23fb2eccde4d97cc65cd487e0a3bdc89456610b0b61f47fb5ba22e3c8db39c06"} Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.895699 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-cn4pn" event={"ID":"c250e8c3-7775-44f8-8544-51e69bdaff07","Type":"ContainerStarted","Data":"509b9f55120eec3f4d51daaf8e43854a9b210d1f4e1d4f676355369fdb0f5949"} Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.895757 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-cn4pn" event={"ID":"c250e8c3-7775-44f8-8544-51e69bdaff07","Type":"ContainerStarted","Data":"a7710b0f5e14417253f1f5d7aab39cf7d9832a53ce7713dea566acf3cf919804"} Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.903741 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-081b-account-create-update-lvp4w" event={"ID":"8ebfb097-2554-4605-bf3c-a545907fbaa6","Type":"ContainerStarted","Data":"99e960204950aab39b698e30feed01b228d877d1629e1b5a77c020b022abf650"} Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.913818 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-ws26t" podStartSLOduration=2.913799557 podStartE2EDuration="2.913799557s" podCreationTimestamp="2025-12-13 06:50:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:50:54.911109413 +0000 UTC m=+1288.777704024" watchObservedRunningTime="2025-12-13 06:50:54.913799557 +0000 UTC m=+1288.780394138" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.914390 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7adf-account-create-update-52mfj" event={"ID":"030ae47b-71e7-4fb3-b518-f807e1fda0a1","Type":"ContainerStarted","Data":"869133386f138ce3e83d7c0462807aa4e2748337faf591013e0f304dcaaa255c"} Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.914461 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7adf-account-create-update-52mfj" event={"ID":"030ae47b-71e7-4fb3-b518-f807e1fda0a1","Type":"ContainerStarted","Data":"d6bd46ff906fce7e69b1896f50188b35df1432279ed7969a9b333bd27fc36bf3"} Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.917006 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-f16a-account-create-update-kwscr" event={"ID":"55848283-b878-4f02-b76f-aaf4ce0d6765","Type":"ContainerStarted","Data":"68ac5be8cfdbc0a27775026e83afa84467a1570b9541fa373cba6db4c29b39c1"} Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.940051 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-cn4pn" podStartSLOduration=2.940032263 podStartE2EDuration="2.940032263s" podCreationTimestamp="2025-12-13 06:50:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:50:54.928251961 +0000 UTC m=+1288.794846562" watchObservedRunningTime="2025-12-13 06:50:54.940032263 +0000 UTC m=+1288.806626844" Dec 13 06:50:54 crc kubenswrapper[5048]: I1213 06:50:54.947758 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-7adf-account-create-update-52mfj" podStartSLOduration=1.947740274 podStartE2EDuration="1.947740274s" podCreationTimestamp="2025-12-13 06:50:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:50:54.946097129 +0000 UTC m=+1288.812691720" watchObservedRunningTime="2025-12-13 06:50:54.947740274 +0000 UTC m=+1288.814334855" Dec 13 06:50:55 crc kubenswrapper[5048]: I1213 06:50:55.124161 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:50:55 crc kubenswrapper[5048]: W1213 06:50:55.129231 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd3148a97_28d6_4381_86c5_a8bccaaa8fc8.slice/crio-9a1110ea29559ba0a506f2f9ef843f9e4ad097795235e4f18d665d4a3bbe369e WatchSource:0}: Error finding container 9a1110ea29559ba0a506f2f9ef843f9e4ad097795235e4f18d665d4a3bbe369e: Status 404 returned error can't find the container with id 9a1110ea29559ba0a506f2f9ef843f9e4ad097795235e4f18d665d4a3bbe369e Dec 13 06:50:55 crc kubenswrapper[5048]: I1213 06:50:55.931758 5048 generic.go:334] "Generic (PLEG): container finished" podID="8ebfb097-2554-4605-bf3c-a545907fbaa6" containerID="364a211cd614b412277535f25bd55c09554bfe33e018dd0e2294b3f08d7fb1a5" exitCode=0 Dec 13 06:50:55 crc kubenswrapper[5048]: I1213 06:50:55.932062 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-081b-account-create-update-lvp4w" event={"ID":"8ebfb097-2554-4605-bf3c-a545907fbaa6","Type":"ContainerDied","Data":"364a211cd614b412277535f25bd55c09554bfe33e018dd0e2294b3f08d7fb1a5"} Dec 13 06:50:55 crc kubenswrapper[5048]: I1213 06:50:55.959364 5048 generic.go:334] "Generic (PLEG): container finished" podID="030ae47b-71e7-4fb3-b518-f807e1fda0a1" containerID="869133386f138ce3e83d7c0462807aa4e2748337faf591013e0f304dcaaa255c" exitCode=0 Dec 13 06:50:55 crc kubenswrapper[5048]: I1213 06:50:55.959830 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7adf-account-create-update-52mfj" event={"ID":"030ae47b-71e7-4fb3-b518-f807e1fda0a1","Type":"ContainerDied","Data":"869133386f138ce3e83d7c0462807aa4e2748337faf591013e0f304dcaaa255c"} Dec 13 06:50:55 crc kubenswrapper[5048]: I1213 06:50:55.966984 5048 generic.go:334] "Generic (PLEG): container finished" podID="55848283-b878-4f02-b76f-aaf4ce0d6765" containerID="e765a8ba38d5b50a76fd8a443424d65d63bdd3325c768dfa3ddc6b59ee7aa616" exitCode=0 Dec 13 06:50:55 crc kubenswrapper[5048]: I1213 06:50:55.967411 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-f16a-account-create-update-kwscr" event={"ID":"55848283-b878-4f02-b76f-aaf4ce0d6765","Type":"ContainerDied","Data":"e765a8ba38d5b50a76fd8a443424d65d63bdd3325c768dfa3ddc6b59ee7aa616"} Dec 13 06:50:55 crc kubenswrapper[5048]: I1213 06:50:55.968531 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d3148a97-28d6-4381-86c5-a8bccaaa8fc8","Type":"ContainerStarted","Data":"9a1110ea29559ba0a506f2f9ef843f9e4ad097795235e4f18d665d4a3bbe369e"} Dec 13 06:50:55 crc kubenswrapper[5048]: I1213 06:50:55.970832 5048 generic.go:334] "Generic (PLEG): container finished" podID="2cbf8637-a57a-4dd4-a175-6ab0886adbc2" containerID="fa48d722834768085e81c545be3ea57e6100150ff87da89799a5d0bdae19c0d6" exitCode=0 Dec 13 06:50:55 crc kubenswrapper[5048]: I1213 06:50:55.971036 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-jnf7w" event={"ID":"2cbf8637-a57a-4dd4-a175-6ab0886adbc2","Type":"ContainerDied","Data":"fa48d722834768085e81c545be3ea57e6100150ff87da89799a5d0bdae19c0d6"} Dec 13 06:50:55 crc kubenswrapper[5048]: I1213 06:50:55.972519 5048 generic.go:334] "Generic (PLEG): container finished" podID="d58379fb-a3bb-4853-995d-2df64fa912d4" containerID="82b7d04cc4842f3ec347f5de3aec97895d8b06b0b5c32ac67d2ef77c93994b9f" exitCode=0 Dec 13 06:50:55 crc kubenswrapper[5048]: I1213 06:50:55.972582 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-ws26t" event={"ID":"d58379fb-a3bb-4853-995d-2df64fa912d4","Type":"ContainerDied","Data":"82b7d04cc4842f3ec347f5de3aec97895d8b06b0b5c32ac67d2ef77c93994b9f"} Dec 13 06:50:55 crc kubenswrapper[5048]: I1213 06:50:55.974198 5048 generic.go:334] "Generic (PLEG): container finished" podID="c250e8c3-7775-44f8-8544-51e69bdaff07" containerID="509b9f55120eec3f4d51daaf8e43854a9b210d1f4e1d4f676355369fdb0f5949" exitCode=0 Dec 13 06:50:55 crc kubenswrapper[5048]: I1213 06:50:55.974234 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-cn4pn" event={"ID":"c250e8c3-7775-44f8-8544-51e69bdaff07","Type":"ContainerDied","Data":"509b9f55120eec3f4d51daaf8e43854a9b210d1f4e1d4f676355369fdb0f5949"} Dec 13 06:50:56 crc kubenswrapper[5048]: I1213 06:50:56.370602 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 13 06:50:56 crc kubenswrapper[5048]: I1213 06:50:56.371235 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="12348a4c-feba-43e8-8f4f-bd2729ccd9ab" containerName="glance-log" containerID="cri-o://480807ce5aa47b3660060c4461dcd0437d03a5d34ecc5ebadc256d917d3ed43c" gracePeriod=30 Dec 13 06:50:56 crc kubenswrapper[5048]: I1213 06:50:56.371690 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="12348a4c-feba-43e8-8f4f-bd2729ccd9ab" containerName="glance-httpd" containerID="cri-o://0918df1446a8bbc0a0db551d1e60cec00bef8b44cc388416436a46a2f719cf62" gracePeriod=30 Dec 13 06:50:56 crc kubenswrapper[5048]: I1213 06:50:56.987946 5048 generic.go:334] "Generic (PLEG): container finished" podID="12348a4c-feba-43e8-8f4f-bd2729ccd9ab" containerID="480807ce5aa47b3660060c4461dcd0437d03a5d34ecc5ebadc256d917d3ed43c" exitCode=143 Dec 13 06:50:56 crc kubenswrapper[5048]: I1213 06:50:56.988811 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"12348a4c-feba-43e8-8f4f-bd2729ccd9ab","Type":"ContainerDied","Data":"480807ce5aa47b3660060c4461dcd0437d03a5d34ecc5ebadc256d917d3ed43c"} Dec 13 06:50:56 crc kubenswrapper[5048]: I1213 06:50:56.990346 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d3148a97-28d6-4381-86c5-a8bccaaa8fc8","Type":"ContainerStarted","Data":"b0ddfdb03c5c2802527a288325d49a92001db30f1e8796423dfc1eaee77c1a45"} Dec 13 06:50:56 crc kubenswrapper[5048]: I1213 06:50:56.990452 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d3148a97-28d6-4381-86c5-a8bccaaa8fc8","Type":"ContainerStarted","Data":"c183bcaf1968701cca3713d2b37d9cbc2128f54b5bb7c77bd3141554d5389bb8"} Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.648017 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-jnf7w" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.678900 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-f16a-account-create-update-kwscr" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.689400 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-cn4pn" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.694563 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7adf-account-create-update-52mfj" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.727157 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-081b-account-create-update-lvp4w" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.736832 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-ws26t" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.741000 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6vm67\" (UniqueName: \"kubernetes.io/projected/55848283-b878-4f02-b76f-aaf4ce0d6765-kube-api-access-6vm67\") pod \"55848283-b878-4f02-b76f-aaf4ce0d6765\" (UID: \"55848283-b878-4f02-b76f-aaf4ce0d6765\") " Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.741072 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55848283-b878-4f02-b76f-aaf4ce0d6765-operator-scripts\") pod \"55848283-b878-4f02-b76f-aaf4ce0d6765\" (UID: \"55848283-b878-4f02-b76f-aaf4ce0d6765\") " Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.741109 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5nkhr\" (UniqueName: \"kubernetes.io/projected/c250e8c3-7775-44f8-8544-51e69bdaff07-kube-api-access-5nkhr\") pod \"c250e8c3-7775-44f8-8544-51e69bdaff07\" (UID: \"c250e8c3-7775-44f8-8544-51e69bdaff07\") " Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.741173 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cbf8637-a57a-4dd4-a175-6ab0886adbc2-operator-scripts\") pod \"2cbf8637-a57a-4dd4-a175-6ab0886adbc2\" (UID: \"2cbf8637-a57a-4dd4-a175-6ab0886adbc2\") " Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.744095 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55848283-b878-4f02-b76f-aaf4ce0d6765-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "55848283-b878-4f02-b76f-aaf4ce0d6765" (UID: "55848283-b878-4f02-b76f-aaf4ce0d6765"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.745186 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2cbf8637-a57a-4dd4-a175-6ab0886adbc2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2cbf8637-a57a-4dd4-a175-6ab0886adbc2" (UID: "2cbf8637-a57a-4dd4-a175-6ab0886adbc2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.748707 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngdq2\" (UniqueName: \"kubernetes.io/projected/030ae47b-71e7-4fb3-b518-f807e1fda0a1-kube-api-access-ngdq2\") pod \"030ae47b-71e7-4fb3-b518-f807e1fda0a1\" (UID: \"030ae47b-71e7-4fb3-b518-f807e1fda0a1\") " Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.748784 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-95sx5\" (UniqueName: \"kubernetes.io/projected/2cbf8637-a57a-4dd4-a175-6ab0886adbc2-kube-api-access-95sx5\") pod \"2cbf8637-a57a-4dd4-a175-6ab0886adbc2\" (UID: \"2cbf8637-a57a-4dd4-a175-6ab0886adbc2\") " Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.748845 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c250e8c3-7775-44f8-8544-51e69bdaff07-operator-scripts\") pod \"c250e8c3-7775-44f8-8544-51e69bdaff07\" (UID: \"c250e8c3-7775-44f8-8544-51e69bdaff07\") " Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.748918 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/030ae47b-71e7-4fb3-b518-f807e1fda0a1-operator-scripts\") pod \"030ae47b-71e7-4fb3-b518-f807e1fda0a1\" (UID: \"030ae47b-71e7-4fb3-b518-f807e1fda0a1\") " Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.749514 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c250e8c3-7775-44f8-8544-51e69bdaff07-kube-api-access-5nkhr" (OuterVolumeSpecName: "kube-api-access-5nkhr") pod "c250e8c3-7775-44f8-8544-51e69bdaff07" (UID: "c250e8c3-7775-44f8-8544-51e69bdaff07"). InnerVolumeSpecName "kube-api-access-5nkhr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.749546 5048 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55848283-b878-4f02-b76f-aaf4ce0d6765-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.749558 5048 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cbf8637-a57a-4dd4-a175-6ab0886adbc2-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.750216 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c250e8c3-7775-44f8-8544-51e69bdaff07-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c250e8c3-7775-44f8-8544-51e69bdaff07" (UID: "c250e8c3-7775-44f8-8544-51e69bdaff07"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.755312 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/030ae47b-71e7-4fb3-b518-f807e1fda0a1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "030ae47b-71e7-4fb3-b518-f807e1fda0a1" (UID: "030ae47b-71e7-4fb3-b518-f807e1fda0a1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.757653 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2cbf8637-a57a-4dd4-a175-6ab0886adbc2-kube-api-access-95sx5" (OuterVolumeSpecName: "kube-api-access-95sx5") pod "2cbf8637-a57a-4dd4-a175-6ab0886adbc2" (UID: "2cbf8637-a57a-4dd4-a175-6ab0886adbc2"). InnerVolumeSpecName "kube-api-access-95sx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.757767 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/030ae47b-71e7-4fb3-b518-f807e1fda0a1-kube-api-access-ngdq2" (OuterVolumeSpecName: "kube-api-access-ngdq2") pod "030ae47b-71e7-4fb3-b518-f807e1fda0a1" (UID: "030ae47b-71e7-4fb3-b518-f807e1fda0a1"). InnerVolumeSpecName "kube-api-access-ngdq2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.757840 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55848283-b878-4f02-b76f-aaf4ce0d6765-kube-api-access-6vm67" (OuterVolumeSpecName: "kube-api-access-6vm67") pod "55848283-b878-4f02-b76f-aaf4ce0d6765" (UID: "55848283-b878-4f02-b76f-aaf4ce0d6765"). InnerVolumeSpecName "kube-api-access-6vm67". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.851049 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ebfb097-2554-4605-bf3c-a545907fbaa6-operator-scripts\") pod \"8ebfb097-2554-4605-bf3c-a545907fbaa6\" (UID: \"8ebfb097-2554-4605-bf3c-a545907fbaa6\") " Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.851233 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hx47g\" (UniqueName: \"kubernetes.io/projected/8ebfb097-2554-4605-bf3c-a545907fbaa6-kube-api-access-hx47g\") pod \"8ebfb097-2554-4605-bf3c-a545907fbaa6\" (UID: \"8ebfb097-2554-4605-bf3c-a545907fbaa6\") " Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.851339 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d58379fb-a3bb-4853-995d-2df64fa912d4-operator-scripts\") pod \"d58379fb-a3bb-4853-995d-2df64fa912d4\" (UID: \"d58379fb-a3bb-4853-995d-2df64fa912d4\") " Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.851368 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cjmzx\" (UniqueName: \"kubernetes.io/projected/d58379fb-a3bb-4853-995d-2df64fa912d4-kube-api-access-cjmzx\") pod \"d58379fb-a3bb-4853-995d-2df64fa912d4\" (UID: \"d58379fb-a3bb-4853-995d-2df64fa912d4\") " Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.851742 5048 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c250e8c3-7775-44f8-8544-51e69bdaff07-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.851761 5048 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/030ae47b-71e7-4fb3-b518-f807e1fda0a1-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.851770 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6vm67\" (UniqueName: \"kubernetes.io/projected/55848283-b878-4f02-b76f-aaf4ce0d6765-kube-api-access-6vm67\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.851781 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5nkhr\" (UniqueName: \"kubernetes.io/projected/c250e8c3-7775-44f8-8544-51e69bdaff07-kube-api-access-5nkhr\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.851789 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngdq2\" (UniqueName: \"kubernetes.io/projected/030ae47b-71e7-4fb3-b518-f807e1fda0a1-kube-api-access-ngdq2\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.851797 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-95sx5\" (UniqueName: \"kubernetes.io/projected/2cbf8637-a57a-4dd4-a175-6ab0886adbc2-kube-api-access-95sx5\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.852947 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d58379fb-a3bb-4853-995d-2df64fa912d4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d58379fb-a3bb-4853-995d-2df64fa912d4" (UID: "d58379fb-a3bb-4853-995d-2df64fa912d4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.853254 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ebfb097-2554-4605-bf3c-a545907fbaa6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8ebfb097-2554-4605-bf3c-a545907fbaa6" (UID: "8ebfb097-2554-4605-bf3c-a545907fbaa6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.857608 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ebfb097-2554-4605-bf3c-a545907fbaa6-kube-api-access-hx47g" (OuterVolumeSpecName: "kube-api-access-hx47g") pod "8ebfb097-2554-4605-bf3c-a545907fbaa6" (UID: "8ebfb097-2554-4605-bf3c-a545907fbaa6"). InnerVolumeSpecName "kube-api-access-hx47g". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.857809 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d58379fb-a3bb-4853-995d-2df64fa912d4-kube-api-access-cjmzx" (OuterVolumeSpecName: "kube-api-access-cjmzx") pod "d58379fb-a3bb-4853-995d-2df64fa912d4" (UID: "d58379fb-a3bb-4853-995d-2df64fa912d4"). InnerVolumeSpecName "kube-api-access-cjmzx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.952928 5048 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d58379fb-a3bb-4853-995d-2df64fa912d4-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.952959 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cjmzx\" (UniqueName: \"kubernetes.io/projected/d58379fb-a3bb-4853-995d-2df64fa912d4-kube-api-access-cjmzx\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.952968 5048 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ebfb097-2554-4605-bf3c-a545907fbaa6-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:57 crc kubenswrapper[5048]: I1213 06:50:57.952978 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hx47g\" (UniqueName: \"kubernetes.io/projected/8ebfb097-2554-4605-bf3c-a545907fbaa6-kube-api-access-hx47g\") on node \"crc\" DevicePath \"\"" Dec 13 06:50:58 crc kubenswrapper[5048]: I1213 06:50:58.002100 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d3148a97-28d6-4381-86c5-a8bccaaa8fc8","Type":"ContainerStarted","Data":"db53b2ae1f3cddf108c8d7dada3acd10e3a75b3b36edad7c996bdbd9a2d15f17"} Dec 13 06:50:58 crc kubenswrapper[5048]: I1213 06:50:58.004284 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-jnf7w" Dec 13 06:50:58 crc kubenswrapper[5048]: I1213 06:50:58.004304 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-jnf7w" event={"ID":"2cbf8637-a57a-4dd4-a175-6ab0886adbc2","Type":"ContainerDied","Data":"5ba3c6618250ebca7d8202c548d345e45d04eaa0e75028a32e3ac7fae05b06f1"} Dec 13 06:50:58 crc kubenswrapper[5048]: I1213 06:50:58.004339 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5ba3c6618250ebca7d8202c548d345e45d04eaa0e75028a32e3ac7fae05b06f1" Dec 13 06:50:58 crc kubenswrapper[5048]: I1213 06:50:58.006037 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-ws26t" event={"ID":"d58379fb-a3bb-4853-995d-2df64fa912d4","Type":"ContainerDied","Data":"23fb2eccde4d97cc65cd487e0a3bdc89456610b0b61f47fb5ba22e3c8db39c06"} Dec 13 06:50:58 crc kubenswrapper[5048]: I1213 06:50:58.006066 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-ws26t" Dec 13 06:50:58 crc kubenswrapper[5048]: I1213 06:50:58.006074 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="23fb2eccde4d97cc65cd487e0a3bdc89456610b0b61f47fb5ba22e3c8db39c06" Dec 13 06:50:58 crc kubenswrapper[5048]: I1213 06:50:58.008982 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-cn4pn" event={"ID":"c250e8c3-7775-44f8-8544-51e69bdaff07","Type":"ContainerDied","Data":"a7710b0f5e14417253f1f5d7aab39cf7d9832a53ce7713dea566acf3cf919804"} Dec 13 06:50:58 crc kubenswrapper[5048]: I1213 06:50:58.009171 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a7710b0f5e14417253f1f5d7aab39cf7d9832a53ce7713dea566acf3cf919804" Dec 13 06:50:58 crc kubenswrapper[5048]: I1213 06:50:58.009026 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-cn4pn" Dec 13 06:50:58 crc kubenswrapper[5048]: I1213 06:50:58.012343 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-081b-account-create-update-lvp4w" event={"ID":"8ebfb097-2554-4605-bf3c-a545907fbaa6","Type":"ContainerDied","Data":"99e960204950aab39b698e30feed01b228d877d1629e1b5a77c020b022abf650"} Dec 13 06:50:58 crc kubenswrapper[5048]: I1213 06:50:58.012553 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="99e960204950aab39b698e30feed01b228d877d1629e1b5a77c020b022abf650" Dec 13 06:50:58 crc kubenswrapper[5048]: I1213 06:50:58.012357 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-081b-account-create-update-lvp4w" Dec 13 06:50:58 crc kubenswrapper[5048]: I1213 06:50:58.014066 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7adf-account-create-update-52mfj" event={"ID":"030ae47b-71e7-4fb3-b518-f807e1fda0a1","Type":"ContainerDied","Data":"d6bd46ff906fce7e69b1896f50188b35df1432279ed7969a9b333bd27fc36bf3"} Dec 13 06:50:58 crc kubenswrapper[5048]: I1213 06:50:58.014100 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7adf-account-create-update-52mfj" Dec 13 06:50:58 crc kubenswrapper[5048]: I1213 06:50:58.014114 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d6bd46ff906fce7e69b1896f50188b35df1432279ed7969a9b333bd27fc36bf3" Dec 13 06:50:58 crc kubenswrapper[5048]: I1213 06:50:58.015919 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-f16a-account-create-update-kwscr" event={"ID":"55848283-b878-4f02-b76f-aaf4ce0d6765","Type":"ContainerDied","Data":"68ac5be8cfdbc0a27775026e83afa84467a1570b9541fa373cba6db4c29b39c1"} Dec 13 06:50:58 crc kubenswrapper[5048]: I1213 06:50:58.015942 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="68ac5be8cfdbc0a27775026e83afa84467a1570b9541fa373cba6db4c29b39c1" Dec 13 06:50:58 crc kubenswrapper[5048]: I1213 06:50:58.015953 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-f16a-account-create-update-kwscr" Dec 13 06:50:58 crc kubenswrapper[5048]: I1213 06:50:58.204952 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 13 06:50:58 crc kubenswrapper[5048]: I1213 06:50:58.205222 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="ce81fe6a-857c-4bcd-ae82-b7a6f0280326" containerName="glance-log" containerID="cri-o://6a29f4af7eda17ec5252628950659560cb241ad301e31efa51a483f5ad18d122" gracePeriod=30 Dec 13 06:50:58 crc kubenswrapper[5048]: I1213 06:50:58.205421 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="ce81fe6a-857c-4bcd-ae82-b7a6f0280326" containerName="glance-httpd" containerID="cri-o://edcbf41f27739464565246ff6ee05b468e763fb695031f3c1e230ea539bc72a2" gracePeriod=30 Dec 13 06:50:59 crc kubenswrapper[5048]: I1213 06:50:59.026651 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d3148a97-28d6-4381-86c5-a8bccaaa8fc8","Type":"ContainerStarted","Data":"69a204dbae34c7c921e80dea198cf5356275aa66a91ab99fdec8e52b634cb0ff"} Dec 13 06:50:59 crc kubenswrapper[5048]: I1213 06:50:59.026810 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d3148a97-28d6-4381-86c5-a8bccaaa8fc8" containerName="proxy-httpd" containerID="cri-o://69a204dbae34c7c921e80dea198cf5356275aa66a91ab99fdec8e52b634cb0ff" gracePeriod=30 Dec 13 06:50:59 crc kubenswrapper[5048]: I1213 06:50:59.026783 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d3148a97-28d6-4381-86c5-a8bccaaa8fc8" containerName="ceilometer-central-agent" containerID="cri-o://c183bcaf1968701cca3713d2b37d9cbc2128f54b5bb7c77bd3141554d5389bb8" gracePeriod=30 Dec 13 06:50:59 crc kubenswrapper[5048]: I1213 06:50:59.027136 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 13 06:50:59 crc kubenswrapper[5048]: I1213 06:50:59.026860 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d3148a97-28d6-4381-86c5-a8bccaaa8fc8" containerName="sg-core" containerID="cri-o://db53b2ae1f3cddf108c8d7dada3acd10e3a75b3b36edad7c996bdbd9a2d15f17" gracePeriod=30 Dec 13 06:50:59 crc kubenswrapper[5048]: I1213 06:50:59.026906 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d3148a97-28d6-4381-86c5-a8bccaaa8fc8" containerName="ceilometer-notification-agent" containerID="cri-o://b0ddfdb03c5c2802527a288325d49a92001db30f1e8796423dfc1eaee77c1a45" gracePeriod=30 Dec 13 06:50:59 crc kubenswrapper[5048]: I1213 06:50:59.032970 5048 generic.go:334] "Generic (PLEG): container finished" podID="ce81fe6a-857c-4bcd-ae82-b7a6f0280326" containerID="6a29f4af7eda17ec5252628950659560cb241ad301e31efa51a483f5ad18d122" exitCode=143 Dec 13 06:50:59 crc kubenswrapper[5048]: I1213 06:50:59.033012 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ce81fe6a-857c-4bcd-ae82-b7a6f0280326","Type":"ContainerDied","Data":"6a29f4af7eda17ec5252628950659560cb241ad301e31efa51a483f5ad18d122"} Dec 13 06:50:59 crc kubenswrapper[5048]: I1213 06:50:59.066390 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.5328380099999999 podStartE2EDuration="5.06636624s" podCreationTimestamp="2025-12-13 06:50:54 +0000 UTC" firstStartedPulling="2025-12-13 06:50:55.133042167 +0000 UTC m=+1288.999636748" lastFinishedPulling="2025-12-13 06:50:58.666570397 +0000 UTC m=+1292.533164978" observedRunningTime="2025-12-13 06:50:59.056018828 +0000 UTC m=+1292.922613409" watchObservedRunningTime="2025-12-13 06:50:59.06636624 +0000 UTC m=+1292.932960821" Dec 13 06:51:00 crc kubenswrapper[5048]: I1213 06:51:00.043686 5048 generic.go:334] "Generic (PLEG): container finished" podID="d3148a97-28d6-4381-86c5-a8bccaaa8fc8" containerID="69a204dbae34c7c921e80dea198cf5356275aa66a91ab99fdec8e52b634cb0ff" exitCode=0 Dec 13 06:51:00 crc kubenswrapper[5048]: I1213 06:51:00.043955 5048 generic.go:334] "Generic (PLEG): container finished" podID="d3148a97-28d6-4381-86c5-a8bccaaa8fc8" containerID="db53b2ae1f3cddf108c8d7dada3acd10e3a75b3b36edad7c996bdbd9a2d15f17" exitCode=2 Dec 13 06:51:00 crc kubenswrapper[5048]: I1213 06:51:00.043761 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d3148a97-28d6-4381-86c5-a8bccaaa8fc8","Type":"ContainerDied","Data":"69a204dbae34c7c921e80dea198cf5356275aa66a91ab99fdec8e52b634cb0ff"} Dec 13 06:51:00 crc kubenswrapper[5048]: I1213 06:51:00.044030 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d3148a97-28d6-4381-86c5-a8bccaaa8fc8","Type":"ContainerDied","Data":"db53b2ae1f3cddf108c8d7dada3acd10e3a75b3b36edad7c996bdbd9a2d15f17"} Dec 13 06:51:00 crc kubenswrapper[5048]: I1213 06:51:00.680620 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:51:00 crc kubenswrapper[5048]: I1213 06:51:00.682513 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6c7b8f495-lq789" Dec 13 06:51:01 crc kubenswrapper[5048]: I1213 06:51:01.059116 5048 generic.go:334] "Generic (PLEG): container finished" podID="d3148a97-28d6-4381-86c5-a8bccaaa8fc8" containerID="b0ddfdb03c5c2802527a288325d49a92001db30f1e8796423dfc1eaee77c1a45" exitCode=0 Dec 13 06:51:01 crc kubenswrapper[5048]: I1213 06:51:01.059168 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d3148a97-28d6-4381-86c5-a8bccaaa8fc8","Type":"ContainerDied","Data":"b0ddfdb03c5c2802527a288325d49a92001db30f1e8796423dfc1eaee77c1a45"} Dec 13 06:51:01 crc kubenswrapper[5048]: I1213 06:51:01.063121 5048 generic.go:334] "Generic (PLEG): container finished" podID="12348a4c-feba-43e8-8f4f-bd2729ccd9ab" containerID="0918df1446a8bbc0a0db551d1e60cec00bef8b44cc388416436a46a2f719cf62" exitCode=0 Dec 13 06:51:01 crc kubenswrapper[5048]: I1213 06:51:01.063196 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"12348a4c-feba-43e8-8f4f-bd2729ccd9ab","Type":"ContainerDied","Data":"0918df1446a8bbc0a0db551d1e60cec00bef8b44cc388416436a46a2f719cf62"} Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.072795 5048 generic.go:334] "Generic (PLEG): container finished" podID="d3148a97-28d6-4381-86c5-a8bccaaa8fc8" containerID="c183bcaf1968701cca3713d2b37d9cbc2128f54b5bb7c77bd3141554d5389bb8" exitCode=0 Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.072843 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d3148a97-28d6-4381-86c5-a8bccaaa8fc8","Type":"ContainerDied","Data":"c183bcaf1968701cca3713d2b37d9cbc2128f54b5bb7c77bd3141554d5389bb8"} Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.810950 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.817706 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.841077 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-scripts\") pod \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.841513 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-run-httpd\") pod \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.841575 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xfknn\" (UniqueName: \"kubernetes.io/projected/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-kube-api-access-xfknn\") pod \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.841680 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-config-data\") pod \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.841792 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d3148a97-28d6-4381-86c5-a8bccaaa8fc8" (UID: "d3148a97-28d6-4381-86c5-a8bccaaa8fc8"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.841838 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-sg-core-conf-yaml\") pod \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.841866 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-log-httpd\") pod \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.841892 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-combined-ca-bundle\") pod \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\" (UID: \"d3148a97-28d6-4381-86c5-a8bccaaa8fc8\") " Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.843158 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d3148a97-28d6-4381-86c5-a8bccaaa8fc8" (UID: "d3148a97-28d6-4381-86c5-a8bccaaa8fc8"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.851571 5048 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.851614 5048 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.860048 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-kube-api-access-xfknn" (OuterVolumeSpecName: "kube-api-access-xfknn") pod "d3148a97-28d6-4381-86c5-a8bccaaa8fc8" (UID: "d3148a97-28d6-4381-86c5-a8bccaaa8fc8"). InnerVolumeSpecName "kube-api-access-xfknn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.878427 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d3148a97-28d6-4381-86c5-a8bccaaa8fc8" (UID: "d3148a97-28d6-4381-86c5-a8bccaaa8fc8"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.880136 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-scripts" (OuterVolumeSpecName: "scripts") pod "d3148a97-28d6-4381-86c5-a8bccaaa8fc8" (UID: "d3148a97-28d6-4381-86c5-a8bccaaa8fc8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.960917 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.961892 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-httpd-run\") pod \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.961947 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-combined-ca-bundle\") pod \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.962001 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-config-data\") pod \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.962053 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqcdl\" (UniqueName: \"kubernetes.io/projected/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-kube-api-access-bqcdl\") pod \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.962074 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-scripts\") pod \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.962101 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-internal-tls-certs\") pod \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.962179 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-logs\") pod \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\" (UID: \"12348a4c-feba-43e8-8f4f-bd2729ccd9ab\") " Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.962772 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xfknn\" (UniqueName: \"kubernetes.io/projected/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-kube-api-access-xfknn\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.962788 5048 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.962798 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.963840 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "12348a4c-feba-43e8-8f4f-bd2729ccd9ab" (UID: "12348a4c-feba-43e8-8f4f-bd2729ccd9ab"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.963917 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-logs" (OuterVolumeSpecName: "logs") pod "12348a4c-feba-43e8-8f4f-bd2729ccd9ab" (UID: "12348a4c-feba-43e8-8f4f-bd2729ccd9ab"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.965459 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "12348a4c-feba-43e8-8f4f-bd2729ccd9ab" (UID: "12348a4c-feba-43e8-8f4f-bd2729ccd9ab"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.968165 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-scripts" (OuterVolumeSpecName: "scripts") pod "12348a4c-feba-43e8-8f4f-bd2729ccd9ab" (UID: "12348a4c-feba-43e8-8f4f-bd2729ccd9ab"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.973192 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-kube-api-access-bqcdl" (OuterVolumeSpecName: "kube-api-access-bqcdl") pod "12348a4c-feba-43e8-8f4f-bd2729ccd9ab" (UID: "12348a4c-feba-43e8-8f4f-bd2729ccd9ab"). InnerVolumeSpecName "kube-api-access-bqcdl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:51:02 crc kubenswrapper[5048]: I1213 06:51:02.998040 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d3148a97-28d6-4381-86c5-a8bccaaa8fc8" (UID: "d3148a97-28d6-4381-86c5-a8bccaaa8fc8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.011019 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "12348a4c-feba-43e8-8f4f-bd2729ccd9ab" (UID: "12348a4c-feba-43e8-8f4f-bd2729ccd9ab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.012004 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-config-data" (OuterVolumeSpecName: "config-data") pod "d3148a97-28d6-4381-86c5-a8bccaaa8fc8" (UID: "d3148a97-28d6-4381-86c5-a8bccaaa8fc8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.035068 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "12348a4c-feba-43e8-8f4f-bd2729ccd9ab" (UID: "12348a4c-feba-43e8-8f4f-bd2729ccd9ab"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.035602 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-config-data" (OuterVolumeSpecName: "config-data") pod "12348a4c-feba-43e8-8f4f-bd2729ccd9ab" (UID: "12348a4c-feba-43e8-8f4f-bd2729ccd9ab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.065400 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.065457 5048 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-logs\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.065499 5048 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.065509 5048 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.065518 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3148a97-28d6-4381-86c5-a8bccaaa8fc8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.065528 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.065536 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.065547 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqcdl\" (UniqueName: \"kubernetes.io/projected/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-kube-api-access-bqcdl\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.065555 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.065563 5048 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/12348a4c-feba-43e8-8f4f-bd2729ccd9ab-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.078088 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.084572 5048 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.086731 5048 generic.go:334] "Generic (PLEG): container finished" podID="ce81fe6a-857c-4bcd-ae82-b7a6f0280326" containerID="edcbf41f27739464565246ff6ee05b468e763fb695031f3c1e230ea539bc72a2" exitCode=0 Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.086792 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ce81fe6a-857c-4bcd-ae82-b7a6f0280326","Type":"ContainerDied","Data":"edcbf41f27739464565246ff6ee05b468e763fb695031f3c1e230ea539bc72a2"} Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.086826 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ce81fe6a-857c-4bcd-ae82-b7a6f0280326","Type":"ContainerDied","Data":"2d908f1317d286b8cd259d0be5c9e39bbe338024d2c2820e373066abd4ff061f"} Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.086847 5048 scope.go:117] "RemoveContainer" containerID="edcbf41f27739464565246ff6ee05b468e763fb695031f3c1e230ea539bc72a2" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.087020 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.089676 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"12348a4c-feba-43e8-8f4f-bd2729ccd9ab","Type":"ContainerDied","Data":"7aac0eac05f44ec94abd6a1fbc3a78b63b7f0ff8e37c36b7e92faf8ab69708d2"} Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.089761 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.142132 5048 scope.go:117] "RemoveContainer" containerID="6a29f4af7eda17ec5252628950659560cb241ad301e31efa51a483f5ad18d122" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.153647 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d3148a97-28d6-4381-86c5-a8bccaaa8fc8","Type":"ContainerDied","Data":"9a1110ea29559ba0a506f2f9ef843f9e4ad097795235e4f18d665d4a3bbe369e"} Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.161736 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.167622 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-scripts\") pod \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.167730 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-combined-ca-bundle\") pod \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.167753 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-config-data\") pod \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.167804 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xd76c\" (UniqueName: \"kubernetes.io/projected/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-kube-api-access-xd76c\") pod \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.167838 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-logs\") pod \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.167858 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.167881 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-public-tls-certs\") pod \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.168021 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-httpd-run\") pod \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\" (UID: \"ce81fe6a-857c-4bcd-ae82-b7a6f0280326\") " Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.168689 5048 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.169380 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "ce81fe6a-857c-4bcd-ae82-b7a6f0280326" (UID: "ce81fe6a-857c-4bcd-ae82-b7a6f0280326"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.171518 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-logs" (OuterVolumeSpecName: "logs") pod "ce81fe6a-857c-4bcd-ae82-b7a6f0280326" (UID: "ce81fe6a-857c-4bcd-ae82-b7a6f0280326"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.180783 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-scripts" (OuterVolumeSpecName: "scripts") pod "ce81fe6a-857c-4bcd-ae82-b7a6f0280326" (UID: "ce81fe6a-857c-4bcd-ae82-b7a6f0280326"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.184825 5048 scope.go:117] "RemoveContainer" containerID="edcbf41f27739464565246ff6ee05b468e763fb695031f3c1e230ea539bc72a2" Dec 13 06:51:03 crc kubenswrapper[5048]: E1213 06:51:03.185348 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"edcbf41f27739464565246ff6ee05b468e763fb695031f3c1e230ea539bc72a2\": container with ID starting with edcbf41f27739464565246ff6ee05b468e763fb695031f3c1e230ea539bc72a2 not found: ID does not exist" containerID="edcbf41f27739464565246ff6ee05b468e763fb695031f3c1e230ea539bc72a2" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.185477 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edcbf41f27739464565246ff6ee05b468e763fb695031f3c1e230ea539bc72a2"} err="failed to get container status \"edcbf41f27739464565246ff6ee05b468e763fb695031f3c1e230ea539bc72a2\": rpc error: code = NotFound desc = could not find container \"edcbf41f27739464565246ff6ee05b468e763fb695031f3c1e230ea539bc72a2\": container with ID starting with edcbf41f27739464565246ff6ee05b468e763fb695031f3c1e230ea539bc72a2 not found: ID does not exist" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.185512 5048 scope.go:117] "RemoveContainer" containerID="6a29f4af7eda17ec5252628950659560cb241ad301e31efa51a483f5ad18d122" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.185703 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-kube-api-access-xd76c" (OuterVolumeSpecName: "kube-api-access-xd76c") pod "ce81fe6a-857c-4bcd-ae82-b7a6f0280326" (UID: "ce81fe6a-857c-4bcd-ae82-b7a6f0280326"). InnerVolumeSpecName "kube-api-access-xd76c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:51:03 crc kubenswrapper[5048]: E1213 06:51:03.187089 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a29f4af7eda17ec5252628950659560cb241ad301e31efa51a483f5ad18d122\": container with ID starting with 6a29f4af7eda17ec5252628950659560cb241ad301e31efa51a483f5ad18d122 not found: ID does not exist" containerID="6a29f4af7eda17ec5252628950659560cb241ad301e31efa51a483f5ad18d122" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.187234 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a29f4af7eda17ec5252628950659560cb241ad301e31efa51a483f5ad18d122"} err="failed to get container status \"6a29f4af7eda17ec5252628950659560cb241ad301e31efa51a483f5ad18d122\": rpc error: code = NotFound desc = could not find container \"6a29f4af7eda17ec5252628950659560cb241ad301e31efa51a483f5ad18d122\": container with ID starting with 6a29f4af7eda17ec5252628950659560cb241ad301e31efa51a483f5ad18d122 not found: ID does not exist" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.187269 5048 scope.go:117] "RemoveContainer" containerID="0918df1446a8bbc0a0db551d1e60cec00bef8b44cc388416436a46a2f719cf62" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.191420 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "ce81fe6a-857c-4bcd-ae82-b7a6f0280326" (UID: "ce81fe6a-857c-4bcd-ae82-b7a6f0280326"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.246568 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.248213 5048 scope.go:117] "RemoveContainer" containerID="480807ce5aa47b3660060c4461dcd0437d03a5d34ecc5ebadc256d917d3ed43c" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.256568 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.257729 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ce81fe6a-857c-4bcd-ae82-b7a6f0280326" (UID: "ce81fe6a-857c-4bcd-ae82-b7a6f0280326"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.271389 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-config-data" (OuterVolumeSpecName: "config-data") pod "ce81fe6a-857c-4bcd-ae82-b7a6f0280326" (UID: "ce81fe6a-857c-4bcd-ae82-b7a6f0280326"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.271461 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 13 06:51:03 crc kubenswrapper[5048]: E1213 06:51:03.271836 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce81fe6a-857c-4bcd-ae82-b7a6f0280326" containerName="glance-log" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.271854 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce81fe6a-857c-4bcd-ae82-b7a6f0280326" containerName="glance-log" Dec 13 06:51:03 crc kubenswrapper[5048]: E1213 06:51:03.271868 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3148a97-28d6-4381-86c5-a8bccaaa8fc8" containerName="ceilometer-notification-agent" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.271875 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3148a97-28d6-4381-86c5-a8bccaaa8fc8" containerName="ceilometer-notification-agent" Dec 13 06:51:03 crc kubenswrapper[5048]: E1213 06:51:03.271888 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3148a97-28d6-4381-86c5-a8bccaaa8fc8" containerName="ceilometer-central-agent" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.271894 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3148a97-28d6-4381-86c5-a8bccaaa8fc8" containerName="ceilometer-central-agent" Dec 13 06:51:03 crc kubenswrapper[5048]: E1213 06:51:03.271906 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cbf8637-a57a-4dd4-a175-6ab0886adbc2" containerName="mariadb-database-create" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.271911 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cbf8637-a57a-4dd4-a175-6ab0886adbc2" containerName="mariadb-database-create" Dec 13 06:51:03 crc kubenswrapper[5048]: E1213 06:51:03.271924 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c250e8c3-7775-44f8-8544-51e69bdaff07" containerName="mariadb-database-create" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.271930 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="c250e8c3-7775-44f8-8544-51e69bdaff07" containerName="mariadb-database-create" Dec 13 06:51:03 crc kubenswrapper[5048]: E1213 06:51:03.271945 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d58379fb-a3bb-4853-995d-2df64fa912d4" containerName="mariadb-database-create" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.271951 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="d58379fb-a3bb-4853-995d-2df64fa912d4" containerName="mariadb-database-create" Dec 13 06:51:03 crc kubenswrapper[5048]: E1213 06:51:03.271965 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="030ae47b-71e7-4fb3-b518-f807e1fda0a1" containerName="mariadb-account-create-update" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.271990 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="030ae47b-71e7-4fb3-b518-f807e1fda0a1" containerName="mariadb-account-create-update" Dec 13 06:51:03 crc kubenswrapper[5048]: E1213 06:51:03.272000 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12348a4c-feba-43e8-8f4f-bd2729ccd9ab" containerName="glance-log" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.272006 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="12348a4c-feba-43e8-8f4f-bd2729ccd9ab" containerName="glance-log" Dec 13 06:51:03 crc kubenswrapper[5048]: E1213 06:51:03.272023 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce81fe6a-857c-4bcd-ae82-b7a6f0280326" containerName="glance-httpd" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.272028 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce81fe6a-857c-4bcd-ae82-b7a6f0280326" containerName="glance-httpd" Dec 13 06:51:03 crc kubenswrapper[5048]: E1213 06:51:03.272037 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12348a4c-feba-43e8-8f4f-bd2729ccd9ab" containerName="glance-httpd" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.272044 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="12348a4c-feba-43e8-8f4f-bd2729ccd9ab" containerName="glance-httpd" Dec 13 06:51:03 crc kubenswrapper[5048]: E1213 06:51:03.272052 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ebfb097-2554-4605-bf3c-a545907fbaa6" containerName="mariadb-account-create-update" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.272057 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ebfb097-2554-4605-bf3c-a545907fbaa6" containerName="mariadb-account-create-update" Dec 13 06:51:03 crc kubenswrapper[5048]: E1213 06:51:03.272541 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55848283-b878-4f02-b76f-aaf4ce0d6765" containerName="mariadb-account-create-update" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.272552 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="55848283-b878-4f02-b76f-aaf4ce0d6765" containerName="mariadb-account-create-update" Dec 13 06:51:03 crc kubenswrapper[5048]: E1213 06:51:03.272567 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3148a97-28d6-4381-86c5-a8bccaaa8fc8" containerName="sg-core" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.272573 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3148a97-28d6-4381-86c5-a8bccaaa8fc8" containerName="sg-core" Dec 13 06:51:03 crc kubenswrapper[5048]: E1213 06:51:03.272582 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3148a97-28d6-4381-86c5-a8bccaaa8fc8" containerName="proxy-httpd" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.272588 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3148a97-28d6-4381-86c5-a8bccaaa8fc8" containerName="proxy-httpd" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.272781 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="030ae47b-71e7-4fb3-b518-f807e1fda0a1" containerName="mariadb-account-create-update" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.272795 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ebfb097-2554-4605-bf3c-a545907fbaa6" containerName="mariadb-account-create-update" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.272804 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3148a97-28d6-4381-86c5-a8bccaaa8fc8" containerName="ceilometer-notification-agent" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.272814 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce81fe6a-857c-4bcd-ae82-b7a6f0280326" containerName="glance-httpd" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.272821 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="12348a4c-feba-43e8-8f4f-bd2729ccd9ab" containerName="glance-log" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.272832 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="55848283-b878-4f02-b76f-aaf4ce0d6765" containerName="mariadb-account-create-update" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.272844 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3148a97-28d6-4381-86c5-a8bccaaa8fc8" containerName="ceilometer-central-agent" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.272852 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="c250e8c3-7775-44f8-8544-51e69bdaff07" containerName="mariadb-database-create" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.272864 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3148a97-28d6-4381-86c5-a8bccaaa8fc8" containerName="sg-core" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.272875 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="d58379fb-a3bb-4853-995d-2df64fa912d4" containerName="mariadb-database-create" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.272885 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce81fe6a-857c-4bcd-ae82-b7a6f0280326" containerName="glance-log" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.272892 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="12348a4c-feba-43e8-8f4f-bd2729ccd9ab" containerName="glance-httpd" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.272899 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="2cbf8637-a57a-4dd4-a175-6ab0886adbc2" containerName="mariadb-database-create" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.272908 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3148a97-28d6-4381-86c5-a8bccaaa8fc8" containerName="proxy-httpd" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.274226 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.275051 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.275307 5048 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.275355 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.275366 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.275375 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.275384 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xd76c\" (UniqueName: \"kubernetes.io/projected/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-kube-api-access-xd76c\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.275394 5048 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-logs\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.275415 5048 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.276623 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.277490 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.287567 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.296304 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.313506 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.315614 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.322747 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.323087 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.330505 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.341564 5048 scope.go:117] "RemoveContainer" containerID="69a204dbae34c7c921e80dea198cf5356275aa66a91ab99fdec8e52b634cb0ff" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.352206 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ce81fe6a-857c-4bcd-ae82-b7a6f0280326" (UID: "ce81fe6a-857c-4bcd-ae82-b7a6f0280326"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.352284 5048 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.374727 5048 scope.go:117] "RemoveContainer" containerID="db53b2ae1f3cddf108c8d7dada3acd10e3a75b3b36edad7c996bdbd9a2d15f17" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.377204 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be68c40d-a83c-40f4-ab3b-4f50f64aae15-config-data\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.377238 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7lbcv\" (UniqueName: \"kubernetes.io/projected/7b85c5d6-33f1-4b63-be75-c600247ecd3e-kube-api-access-7lbcv\") pod \"ceilometer-0\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.377263 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be68c40d-a83c-40f4-ab3b-4f50f64aae15-scripts\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.377293 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/be68c40d-a83c-40f4-ab3b-4f50f64aae15-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.377475 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwhwp\" (UniqueName: \"kubernetes.io/projected/be68c40d-a83c-40f4-ab3b-4f50f64aae15-kube-api-access-hwhwp\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.377551 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7b85c5d6-33f1-4b63-be75-c600247ecd3e-log-httpd\") pod \"ceilometer-0\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.377681 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.377717 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.377760 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7b85c5d6-33f1-4b63-be75-c600247ecd3e-run-httpd\") pod \"ceilometer-0\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.378028 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-scripts\") pod \"ceilometer-0\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.378058 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-config-data\") pod \"ceilometer-0\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.378129 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be68c40d-a83c-40f4-ab3b-4f50f64aae15-logs\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.378183 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.378225 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be68c40d-a83c-40f4-ab3b-4f50f64aae15-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.378250 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/be68c40d-a83c-40f4-ab3b-4f50f64aae15-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.378303 5048 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce81fe6a-857c-4bcd-ae82-b7a6f0280326-public-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.378315 5048 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.398236 5048 scope.go:117] "RemoveContainer" containerID="b0ddfdb03c5c2802527a288325d49a92001db30f1e8796423dfc1eaee77c1a45" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.427068 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.432723 5048 scope.go:117] "RemoveContainer" containerID="c183bcaf1968701cca3713d2b37d9cbc2128f54b5bb7c77bd3141554d5389bb8" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.443193 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.456032 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.457873 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.462058 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.462309 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.466321 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.480671 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.480719 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.480746 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be68c40d-a83c-40f4-ab3b-4f50f64aae15-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.480763 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8b6z\" (UniqueName: \"kubernetes.io/projected/c33951ff-f856-420b-90e1-6f776931b17e-kube-api-access-t8b6z\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.480788 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/be68c40d-a83c-40f4-ab3b-4f50f64aae15-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.480809 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c33951ff-f856-420b-90e1-6f776931b17e-logs\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.480826 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be68c40d-a83c-40f4-ab3b-4f50f64aae15-config-data\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.480842 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7lbcv\" (UniqueName: \"kubernetes.io/projected/7b85c5d6-33f1-4b63-be75-c600247ecd3e-kube-api-access-7lbcv\") pod \"ceilometer-0\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.480857 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c33951ff-f856-420b-90e1-6f776931b17e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.480876 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be68c40d-a83c-40f4-ab3b-4f50f64aae15-scripts\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.480895 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c33951ff-f856-420b-90e1-6f776931b17e-scripts\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.480909 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c33951ff-f856-420b-90e1-6f776931b17e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.480932 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/be68c40d-a83c-40f4-ab3b-4f50f64aae15-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.480951 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwhwp\" (UniqueName: \"kubernetes.io/projected/be68c40d-a83c-40f4-ab3b-4f50f64aae15-kube-api-access-hwhwp\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.480972 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c33951ff-f856-420b-90e1-6f776931b17e-config-data\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.480994 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7b85c5d6-33f1-4b63-be75-c600247ecd3e-log-httpd\") pod \"ceilometer-0\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.481038 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c33951ff-f856-420b-90e1-6f776931b17e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.481055 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.481088 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.481108 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7b85c5d6-33f1-4b63-be75-c600247ecd3e-run-httpd\") pod \"ceilometer-0\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.481137 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-scripts\") pod \"ceilometer-0\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.481152 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-config-data\") pod \"ceilometer-0\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.481170 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be68c40d-a83c-40f4-ab3b-4f50f64aae15-logs\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.481620 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be68c40d-a83c-40f4-ab3b-4f50f64aae15-logs\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.484933 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7b85c5d6-33f1-4b63-be75-c600247ecd3e-run-httpd\") pod \"ceilometer-0\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.485177 5048 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.486403 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7b85c5d6-33f1-4b63-be75-c600247ecd3e-log-httpd\") pod \"ceilometer-0\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.486747 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/be68c40d-a83c-40f4-ab3b-4f50f64aae15-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.493752 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be68c40d-a83c-40f4-ab3b-4f50f64aae15-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.498898 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.509336 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-scripts\") pod \"ceilometer-0\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.509607 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.509871 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/be68c40d-a83c-40f4-ab3b-4f50f64aae15-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.513309 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be68c40d-a83c-40f4-ab3b-4f50f64aae15-scripts\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.514332 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-config-data\") pod \"ceilometer-0\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.517009 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwhwp\" (UniqueName: \"kubernetes.io/projected/be68c40d-a83c-40f4-ab3b-4f50f64aae15-kube-api-access-hwhwp\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.518395 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7lbcv\" (UniqueName: \"kubernetes.io/projected/7b85c5d6-33f1-4b63-be75-c600247ecd3e-kube-api-access-7lbcv\") pod \"ceilometer-0\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.525611 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be68c40d-a83c-40f4-ab3b-4f50f64aae15-config-data\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.561357 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"be68c40d-a83c-40f4-ab3b-4f50f64aae15\") " pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.584559 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.584624 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8b6z\" (UniqueName: \"kubernetes.io/projected/c33951ff-f856-420b-90e1-6f776931b17e-kube-api-access-t8b6z\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.584693 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c33951ff-f856-420b-90e1-6f776931b17e-logs\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.584724 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c33951ff-f856-420b-90e1-6f776931b17e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.584756 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c33951ff-f856-420b-90e1-6f776931b17e-scripts\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.584778 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c33951ff-f856-420b-90e1-6f776931b17e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.584831 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c33951ff-f856-420b-90e1-6f776931b17e-config-data\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.584925 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c33951ff-f856-420b-90e1-6f776931b17e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.585500 5048 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.585770 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c33951ff-f856-420b-90e1-6f776931b17e-logs\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.585817 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c33951ff-f856-420b-90e1-6f776931b17e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.589035 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c33951ff-f856-420b-90e1-6f776931b17e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.591071 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c33951ff-f856-420b-90e1-6f776931b17e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.602738 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.603633 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c33951ff-f856-420b-90e1-6f776931b17e-config-data\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.610493 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c33951ff-f856-420b-90e1-6f776931b17e-scripts\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.618935 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8b6z\" (UniqueName: \"kubernetes.io/projected/c33951ff-f856-420b-90e1-6f776931b17e-kube-api-access-t8b6z\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.628019 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"c33951ff-f856-420b-90e1-6f776931b17e\") " pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.652962 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.653789 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-rgt9h"] Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.655088 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-rgt9h" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.658776 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-vhhj4" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.659012 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.661137 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.675943 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-rgt9h"] Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.787585 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc8db113-49b5-41af-af25-3980140fc49d-config-data\") pod \"nova-cell0-conductor-db-sync-rgt9h\" (UID: \"cc8db113-49b5-41af-af25-3980140fc49d\") " pod="openstack/nova-cell0-conductor-db-sync-rgt9h" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.787897 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc8db113-49b5-41af-af25-3980140fc49d-scripts\") pod \"nova-cell0-conductor-db-sync-rgt9h\" (UID: \"cc8db113-49b5-41af-af25-3980140fc49d\") " pod="openstack/nova-cell0-conductor-db-sync-rgt9h" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.788223 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc8db113-49b5-41af-af25-3980140fc49d-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-rgt9h\" (UID: \"cc8db113-49b5-41af-af25-3980140fc49d\") " pod="openstack/nova-cell0-conductor-db-sync-rgt9h" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.788282 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5sqqv\" (UniqueName: \"kubernetes.io/projected/cc8db113-49b5-41af-af25-3980140fc49d-kube-api-access-5sqqv\") pod \"nova-cell0-conductor-db-sync-rgt9h\" (UID: \"cc8db113-49b5-41af-af25-3980140fc49d\") " pod="openstack/nova-cell0-conductor-db-sync-rgt9h" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.794182 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.890648 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc8db113-49b5-41af-af25-3980140fc49d-config-data\") pod \"nova-cell0-conductor-db-sync-rgt9h\" (UID: \"cc8db113-49b5-41af-af25-3980140fc49d\") " pod="openstack/nova-cell0-conductor-db-sync-rgt9h" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.890729 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc8db113-49b5-41af-af25-3980140fc49d-scripts\") pod \"nova-cell0-conductor-db-sync-rgt9h\" (UID: \"cc8db113-49b5-41af-af25-3980140fc49d\") " pod="openstack/nova-cell0-conductor-db-sync-rgt9h" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.890866 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc8db113-49b5-41af-af25-3980140fc49d-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-rgt9h\" (UID: \"cc8db113-49b5-41af-af25-3980140fc49d\") " pod="openstack/nova-cell0-conductor-db-sync-rgt9h" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.890903 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5sqqv\" (UniqueName: \"kubernetes.io/projected/cc8db113-49b5-41af-af25-3980140fc49d-kube-api-access-5sqqv\") pod \"nova-cell0-conductor-db-sync-rgt9h\" (UID: \"cc8db113-49b5-41af-af25-3980140fc49d\") " pod="openstack/nova-cell0-conductor-db-sync-rgt9h" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.903992 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc8db113-49b5-41af-af25-3980140fc49d-scripts\") pod \"nova-cell0-conductor-db-sync-rgt9h\" (UID: \"cc8db113-49b5-41af-af25-3980140fc49d\") " pod="openstack/nova-cell0-conductor-db-sync-rgt9h" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.908721 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc8db113-49b5-41af-af25-3980140fc49d-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-rgt9h\" (UID: \"cc8db113-49b5-41af-af25-3980140fc49d\") " pod="openstack/nova-cell0-conductor-db-sync-rgt9h" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.915019 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5sqqv\" (UniqueName: \"kubernetes.io/projected/cc8db113-49b5-41af-af25-3980140fc49d-kube-api-access-5sqqv\") pod \"nova-cell0-conductor-db-sync-rgt9h\" (UID: \"cc8db113-49b5-41af-af25-3980140fc49d\") " pod="openstack/nova-cell0-conductor-db-sync-rgt9h" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.918996 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc8db113-49b5-41af-af25-3980140fc49d-config-data\") pod \"nova-cell0-conductor-db-sync-rgt9h\" (UID: \"cc8db113-49b5-41af-af25-3980140fc49d\") " pod="openstack/nova-cell0-conductor-db-sync-rgt9h" Dec 13 06:51:03 crc kubenswrapper[5048]: I1213 06:51:03.990540 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-rgt9h" Dec 13 06:51:04 crc kubenswrapper[5048]: I1213 06:51:04.255781 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:51:04 crc kubenswrapper[5048]: I1213 06:51:04.362977 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-rgt9h"] Dec 13 06:51:04 crc kubenswrapper[5048]: I1213 06:51:04.385780 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 13 06:51:04 crc kubenswrapper[5048]: W1213 06:51:04.519783 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc33951ff_f856_420b_90e1_6f776931b17e.slice/crio-4d7a65f32a206745f29c22f434051b7e76789a35f035c05274478e85cd792449 WatchSource:0}: Error finding container 4d7a65f32a206745f29c22f434051b7e76789a35f035c05274478e85cd792449: Status 404 returned error can't find the container with id 4d7a65f32a206745f29c22f434051b7e76789a35f035c05274478e85cd792449 Dec 13 06:51:04 crc kubenswrapper[5048]: I1213 06:51:04.521379 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 13 06:51:04 crc kubenswrapper[5048]: I1213 06:51:04.583648 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12348a4c-feba-43e8-8f4f-bd2729ccd9ab" path="/var/lib/kubelet/pods/12348a4c-feba-43e8-8f4f-bd2729ccd9ab/volumes" Dec 13 06:51:04 crc kubenswrapper[5048]: I1213 06:51:04.585467 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce81fe6a-857c-4bcd-ae82-b7a6f0280326" path="/var/lib/kubelet/pods/ce81fe6a-857c-4bcd-ae82-b7a6f0280326/volumes" Dec 13 06:51:04 crc kubenswrapper[5048]: I1213 06:51:04.586688 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3148a97-28d6-4381-86c5-a8bccaaa8fc8" path="/var/lib/kubelet/pods/d3148a97-28d6-4381-86c5-a8bccaaa8fc8/volumes" Dec 13 06:51:05 crc kubenswrapper[5048]: I1213 06:51:05.212794 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"be68c40d-a83c-40f4-ab3b-4f50f64aae15","Type":"ContainerStarted","Data":"bc4afa7e78718b6e8994ba02bd39bd31756df96d484dd4869fe438169eb4bc45"} Dec 13 06:51:05 crc kubenswrapper[5048]: I1213 06:51:05.213041 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"be68c40d-a83c-40f4-ab3b-4f50f64aae15","Type":"ContainerStarted","Data":"d2e78637cb8d5fc75d3e9c3dbdbf3c8a2647b04f326bafb8f62f860bf70ef5da"} Dec 13 06:51:05 crc kubenswrapper[5048]: I1213 06:51:05.214146 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-rgt9h" event={"ID":"cc8db113-49b5-41af-af25-3980140fc49d","Type":"ContainerStarted","Data":"9bbc8005e7abee91a1eafa1a42148b77cdc96b7ebed3e004f174534a12b930ee"} Dec 13 06:51:05 crc kubenswrapper[5048]: I1213 06:51:05.219996 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c33951ff-f856-420b-90e1-6f776931b17e","Type":"ContainerStarted","Data":"06736a66bbae53eb2a72b3bcae8cef76f258a2605fa6be4959009b4ec7a33a7c"} Dec 13 06:51:05 crc kubenswrapper[5048]: I1213 06:51:05.220133 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c33951ff-f856-420b-90e1-6f776931b17e","Type":"ContainerStarted","Data":"4d7a65f32a206745f29c22f434051b7e76789a35f035c05274478e85cd792449"} Dec 13 06:51:05 crc kubenswrapper[5048]: I1213 06:51:05.224891 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7b85c5d6-33f1-4b63-be75-c600247ecd3e","Type":"ContainerStarted","Data":"7ccdb275d07f79f98c07e225f664c163af605efb64019e4d0f677cd77a9cc73d"} Dec 13 06:51:05 crc kubenswrapper[5048]: I1213 06:51:05.225394 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7b85c5d6-33f1-4b63-be75-c600247ecd3e","Type":"ContainerStarted","Data":"80ea0d43e0b8370c97c68e28928094e70a14a1d029817c8592d1b18b1783bba2"} Dec 13 06:51:05 crc kubenswrapper[5048]: I1213 06:51:05.961528 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:51:06 crc kubenswrapper[5048]: I1213 06:51:06.238341 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"be68c40d-a83c-40f4-ab3b-4f50f64aae15","Type":"ContainerStarted","Data":"fd136dbc3787c56bc2b713821fc45ef94e801ec54a499aaf32d72a94073eb2ef"} Dec 13 06:51:06 crc kubenswrapper[5048]: I1213 06:51:06.240832 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c33951ff-f856-420b-90e1-6f776931b17e","Type":"ContainerStarted","Data":"5473de566e43edf08a1372573a788f7b772e9bac86a137f56fefa45065d02d50"} Dec 13 06:51:06 crc kubenswrapper[5048]: I1213 06:51:06.242688 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7b85c5d6-33f1-4b63-be75-c600247ecd3e","Type":"ContainerStarted","Data":"2c9ca1044cb8b2f572f4422912fece22576a73b11efa79a795c2aa74c0bd8eca"} Dec 13 06:51:06 crc kubenswrapper[5048]: I1213 06:51:06.337210 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.337192209 podStartE2EDuration="3.337192209s" podCreationTimestamp="2025-12-13 06:51:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:51:06.282573196 +0000 UTC m=+1300.149167787" watchObservedRunningTime="2025-12-13 06:51:06.337192209 +0000 UTC m=+1300.203786790" Dec 13 06:51:06 crc kubenswrapper[5048]: I1213 06:51:06.338691 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.338684049 podStartE2EDuration="3.338684049s" podCreationTimestamp="2025-12-13 06:51:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:51:06.330890217 +0000 UTC m=+1300.197484808" watchObservedRunningTime="2025-12-13 06:51:06.338684049 +0000 UTC m=+1300.205278620" Dec 13 06:51:07 crc kubenswrapper[5048]: I1213 06:51:07.254849 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7b85c5d6-33f1-4b63-be75-c600247ecd3e","Type":"ContainerStarted","Data":"cb290fc8f264f7b8a39b0de484d6bc6d9daecdb304af6b9ba0e66debd9da7ff3"} Dec 13 06:51:09 crc kubenswrapper[5048]: I1213 06:51:09.275557 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7b85c5d6-33f1-4b63-be75-c600247ecd3e","Type":"ContainerStarted","Data":"901606b451fa873de15243ea1efac2aa412ceb9f41f7f21bf6e278651ef90c31"} Dec 13 06:51:09 crc kubenswrapper[5048]: I1213 06:51:09.275940 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7b85c5d6-33f1-4b63-be75-c600247ecd3e" containerName="ceilometer-central-agent" containerID="cri-o://7ccdb275d07f79f98c07e225f664c163af605efb64019e4d0f677cd77a9cc73d" gracePeriod=30 Dec 13 06:51:09 crc kubenswrapper[5048]: I1213 06:51:09.276188 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 13 06:51:09 crc kubenswrapper[5048]: I1213 06:51:09.276207 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7b85c5d6-33f1-4b63-be75-c600247ecd3e" containerName="proxy-httpd" containerID="cri-o://901606b451fa873de15243ea1efac2aa412ceb9f41f7f21bf6e278651ef90c31" gracePeriod=30 Dec 13 06:51:09 crc kubenswrapper[5048]: I1213 06:51:09.276270 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7b85c5d6-33f1-4b63-be75-c600247ecd3e" containerName="sg-core" containerID="cri-o://cb290fc8f264f7b8a39b0de484d6bc6d9daecdb304af6b9ba0e66debd9da7ff3" gracePeriod=30 Dec 13 06:51:09 crc kubenswrapper[5048]: I1213 06:51:09.276320 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7b85c5d6-33f1-4b63-be75-c600247ecd3e" containerName="ceilometer-notification-agent" containerID="cri-o://2c9ca1044cb8b2f572f4422912fece22576a73b11efa79a795c2aa74c0bd8eca" gracePeriod=30 Dec 13 06:51:09 crc kubenswrapper[5048]: I1213 06:51:09.306909 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.40032867 podStartE2EDuration="6.306890093s" podCreationTimestamp="2025-12-13 06:51:03 +0000 UTC" firstStartedPulling="2025-12-13 06:51:04.278382894 +0000 UTC m=+1298.144977475" lastFinishedPulling="2025-12-13 06:51:08.184944317 +0000 UTC m=+1302.051538898" observedRunningTime="2025-12-13 06:51:09.301370932 +0000 UTC m=+1303.167965523" watchObservedRunningTime="2025-12-13 06:51:09.306890093 +0000 UTC m=+1303.173484674" Dec 13 06:51:10 crc kubenswrapper[5048]: I1213 06:51:10.295841 5048 generic.go:334] "Generic (PLEG): container finished" podID="7b85c5d6-33f1-4b63-be75-c600247ecd3e" containerID="901606b451fa873de15243ea1efac2aa412ceb9f41f7f21bf6e278651ef90c31" exitCode=0 Dec 13 06:51:10 crc kubenswrapper[5048]: I1213 06:51:10.295872 5048 generic.go:334] "Generic (PLEG): container finished" podID="7b85c5d6-33f1-4b63-be75-c600247ecd3e" containerID="cb290fc8f264f7b8a39b0de484d6bc6d9daecdb304af6b9ba0e66debd9da7ff3" exitCode=2 Dec 13 06:51:10 crc kubenswrapper[5048]: I1213 06:51:10.295881 5048 generic.go:334] "Generic (PLEG): container finished" podID="7b85c5d6-33f1-4b63-be75-c600247ecd3e" containerID="2c9ca1044cb8b2f572f4422912fece22576a73b11efa79a795c2aa74c0bd8eca" exitCode=0 Dec 13 06:51:10 crc kubenswrapper[5048]: I1213 06:51:10.295890 5048 generic.go:334] "Generic (PLEG): container finished" podID="7b85c5d6-33f1-4b63-be75-c600247ecd3e" containerID="7ccdb275d07f79f98c07e225f664c163af605efb64019e4d0f677cd77a9cc73d" exitCode=0 Dec 13 06:51:10 crc kubenswrapper[5048]: I1213 06:51:10.295910 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7b85c5d6-33f1-4b63-be75-c600247ecd3e","Type":"ContainerDied","Data":"901606b451fa873de15243ea1efac2aa412ceb9f41f7f21bf6e278651ef90c31"} Dec 13 06:51:10 crc kubenswrapper[5048]: I1213 06:51:10.295937 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7b85c5d6-33f1-4b63-be75-c600247ecd3e","Type":"ContainerDied","Data":"cb290fc8f264f7b8a39b0de484d6bc6d9daecdb304af6b9ba0e66debd9da7ff3"} Dec 13 06:51:10 crc kubenswrapper[5048]: I1213 06:51:10.295947 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7b85c5d6-33f1-4b63-be75-c600247ecd3e","Type":"ContainerDied","Data":"2c9ca1044cb8b2f572f4422912fece22576a73b11efa79a795c2aa74c0bd8eca"} Dec 13 06:51:10 crc kubenswrapper[5048]: I1213 06:51:10.295957 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7b85c5d6-33f1-4b63-be75-c600247ecd3e","Type":"ContainerDied","Data":"7ccdb275d07f79f98c07e225f664c163af605efb64019e4d0f677cd77a9cc73d"} Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.461567 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.583019 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7b85c5d6-33f1-4b63-be75-c600247ecd3e-run-httpd\") pod \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.583082 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-config-data\") pod \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.583152 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7lbcv\" (UniqueName: \"kubernetes.io/projected/7b85c5d6-33f1-4b63-be75-c600247ecd3e-kube-api-access-7lbcv\") pod \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.583185 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7b85c5d6-33f1-4b63-be75-c600247ecd3e-log-httpd\") pod \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.583259 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-sg-core-conf-yaml\") pod \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.583345 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-combined-ca-bundle\") pod \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.583373 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-scripts\") pod \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\" (UID: \"7b85c5d6-33f1-4b63-be75-c600247ecd3e\") " Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.583652 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b85c5d6-33f1-4b63-be75-c600247ecd3e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7b85c5d6-33f1-4b63-be75-c600247ecd3e" (UID: "7b85c5d6-33f1-4b63-be75-c600247ecd3e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.583793 5048 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7b85c5d6-33f1-4b63-be75-c600247ecd3e-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.584307 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b85c5d6-33f1-4b63-be75-c600247ecd3e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7b85c5d6-33f1-4b63-be75-c600247ecd3e" (UID: "7b85c5d6-33f1-4b63-be75-c600247ecd3e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.589278 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b85c5d6-33f1-4b63-be75-c600247ecd3e-kube-api-access-7lbcv" (OuterVolumeSpecName: "kube-api-access-7lbcv") pod "7b85c5d6-33f1-4b63-be75-c600247ecd3e" (UID: "7b85c5d6-33f1-4b63-be75-c600247ecd3e"). InnerVolumeSpecName "kube-api-access-7lbcv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.591090 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-scripts" (OuterVolumeSpecName: "scripts") pod "7b85c5d6-33f1-4b63-be75-c600247ecd3e" (UID: "7b85c5d6-33f1-4b63-be75-c600247ecd3e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.603190 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.603377 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.623290 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7b85c5d6-33f1-4b63-be75-c600247ecd3e" (UID: "7b85c5d6-33f1-4b63-be75-c600247ecd3e"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.638121 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.651579 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.691380 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7lbcv\" (UniqueName: \"kubernetes.io/projected/7b85c5d6-33f1-4b63-be75-c600247ecd3e-kube-api-access-7lbcv\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.691410 5048 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7b85c5d6-33f1-4b63-be75-c600247ecd3e-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.691422 5048 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.691449 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.693530 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7b85c5d6-33f1-4b63-be75-c600247ecd3e" (UID: "7b85c5d6-33f1-4b63-be75-c600247ecd3e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.713834 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-config-data" (OuterVolumeSpecName: "config-data") pod "7b85c5d6-33f1-4b63-be75-c600247ecd3e" (UID: "7b85c5d6-33f1-4b63-be75-c600247ecd3e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.793006 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.793039 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b85c5d6-33f1-4b63-be75-c600247ecd3e-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.795387 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.795502 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.825256 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 13 06:51:13 crc kubenswrapper[5048]: I1213 06:51:13.837448 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.364117 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-rgt9h" event={"ID":"cc8db113-49b5-41af-af25-3980140fc49d","Type":"ContainerStarted","Data":"fcdc0428ff0c65f5b4967dd59f7ba95449904abbc314e067bfbb6e57d8f923ee"} Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.367534 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7b85c5d6-33f1-4b63-be75-c600247ecd3e","Type":"ContainerDied","Data":"80ea0d43e0b8370c97c68e28928094e70a14a1d029817c8592d1b18b1783bba2"} Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.367594 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.367605 5048 scope.go:117] "RemoveContainer" containerID="901606b451fa873de15243ea1efac2aa412ceb9f41f7f21bf6e278651ef90c31" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.368087 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.368116 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.368127 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.368299 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.403477 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-rgt9h" podStartSLOduration=2.644110812 podStartE2EDuration="11.403418861s" podCreationTimestamp="2025-12-13 06:51:03 +0000 UTC" firstStartedPulling="2025-12-13 06:51:04.371265412 +0000 UTC m=+1298.237859993" lastFinishedPulling="2025-12-13 06:51:13.130573461 +0000 UTC m=+1306.997168042" observedRunningTime="2025-12-13 06:51:14.381642126 +0000 UTC m=+1308.248236707" watchObservedRunningTime="2025-12-13 06:51:14.403418861 +0000 UTC m=+1308.270013452" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.404589 5048 scope.go:117] "RemoveContainer" containerID="cb290fc8f264f7b8a39b0de484d6bc6d9daecdb304af6b9ba0e66debd9da7ff3" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.446605 5048 scope.go:117] "RemoveContainer" containerID="2c9ca1044cb8b2f572f4422912fece22576a73b11efa79a795c2aa74c0bd8eca" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.446740 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.464660 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.471935 5048 scope.go:117] "RemoveContainer" containerID="7ccdb275d07f79f98c07e225f664c163af605efb64019e4d0f677cd77a9cc73d" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.480589 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:51:14 crc kubenswrapper[5048]: E1213 06:51:14.481055 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b85c5d6-33f1-4b63-be75-c600247ecd3e" containerName="sg-core" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.481081 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b85c5d6-33f1-4b63-be75-c600247ecd3e" containerName="sg-core" Dec 13 06:51:14 crc kubenswrapper[5048]: E1213 06:51:14.481095 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b85c5d6-33f1-4b63-be75-c600247ecd3e" containerName="proxy-httpd" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.481108 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b85c5d6-33f1-4b63-be75-c600247ecd3e" containerName="proxy-httpd" Dec 13 06:51:14 crc kubenswrapper[5048]: E1213 06:51:14.481132 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b85c5d6-33f1-4b63-be75-c600247ecd3e" containerName="ceilometer-notification-agent" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.481142 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b85c5d6-33f1-4b63-be75-c600247ecd3e" containerName="ceilometer-notification-agent" Dec 13 06:51:14 crc kubenswrapper[5048]: E1213 06:51:14.481156 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b85c5d6-33f1-4b63-be75-c600247ecd3e" containerName="ceilometer-central-agent" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.481163 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b85c5d6-33f1-4b63-be75-c600247ecd3e" containerName="ceilometer-central-agent" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.481349 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b85c5d6-33f1-4b63-be75-c600247ecd3e" containerName="ceilometer-central-agent" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.481370 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b85c5d6-33f1-4b63-be75-c600247ecd3e" containerName="proxy-httpd" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.481387 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b85c5d6-33f1-4b63-be75-c600247ecd3e" containerName="sg-core" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.481407 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b85c5d6-33f1-4b63-be75-c600247ecd3e" containerName="ceilometer-notification-agent" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.483322 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.487714 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.487912 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.495146 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.505603 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-scripts\") pod \"ceilometer-0\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.505666 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5082b8c-ec48-4d9c-9204-45580d1bf111-log-httpd\") pod \"ceilometer-0\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.505712 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-config-data\") pod \"ceilometer-0\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.505789 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5082b8c-ec48-4d9c-9204-45580d1bf111-run-httpd\") pod \"ceilometer-0\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.505829 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.505882 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vjvp\" (UniqueName: \"kubernetes.io/projected/a5082b8c-ec48-4d9c-9204-45580d1bf111-kube-api-access-4vjvp\") pod \"ceilometer-0\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.505947 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.578235 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b85c5d6-33f1-4b63-be75-c600247ecd3e" path="/var/lib/kubelet/pods/7b85c5d6-33f1-4b63-be75-c600247ecd3e/volumes" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.607650 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.607746 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vjvp\" (UniqueName: \"kubernetes.io/projected/a5082b8c-ec48-4d9c-9204-45580d1bf111-kube-api-access-4vjvp\") pod \"ceilometer-0\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.607819 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.607930 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-scripts\") pod \"ceilometer-0\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.607975 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5082b8c-ec48-4d9c-9204-45580d1bf111-log-httpd\") pod \"ceilometer-0\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.608020 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-config-data\") pod \"ceilometer-0\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.608119 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5082b8c-ec48-4d9c-9204-45580d1bf111-run-httpd\") pod \"ceilometer-0\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.608614 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5082b8c-ec48-4d9c-9204-45580d1bf111-run-httpd\") pod \"ceilometer-0\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.609326 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5082b8c-ec48-4d9c-9204-45580d1bf111-log-httpd\") pod \"ceilometer-0\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.615587 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-config-data\") pod \"ceilometer-0\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.615740 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-scripts\") pod \"ceilometer-0\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.616138 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.617169 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.636267 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vjvp\" (UniqueName: \"kubernetes.io/projected/a5082b8c-ec48-4d9c-9204-45580d1bf111-kube-api-access-4vjvp\") pod \"ceilometer-0\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " pod="openstack/ceilometer-0" Dec 13 06:51:14 crc kubenswrapper[5048]: I1213 06:51:14.807626 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:51:15 crc kubenswrapper[5048]: I1213 06:51:15.268111 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:51:15 crc kubenswrapper[5048]: I1213 06:51:15.375475 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5082b8c-ec48-4d9c-9204-45580d1bf111","Type":"ContainerStarted","Data":"14e820a9059db3c8bde22497c6b7bca2e3ffe71ab32d5c63a20a4c96feeb8786"} Dec 13 06:51:16 crc kubenswrapper[5048]: I1213 06:51:16.385453 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5082b8c-ec48-4d9c-9204-45580d1bf111","Type":"ContainerStarted","Data":"857455881e008335cf61e00b4a6d2dcca52900ec662d5e1b8ac9bbc18fab1720"} Dec 13 06:51:16 crc kubenswrapper[5048]: I1213 06:51:16.627618 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 13 06:51:16 crc kubenswrapper[5048]: I1213 06:51:16.627719 5048 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 13 06:51:16 crc kubenswrapper[5048]: I1213 06:51:16.628697 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 13 06:51:16 crc kubenswrapper[5048]: I1213 06:51:16.698698 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 13 06:51:16 crc kubenswrapper[5048]: I1213 06:51:16.698832 5048 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 13 06:51:16 crc kubenswrapper[5048]: I1213 06:51:16.703386 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 13 06:51:17 crc kubenswrapper[5048]: I1213 06:51:17.398256 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5082b8c-ec48-4d9c-9204-45580d1bf111","Type":"ContainerStarted","Data":"4e917bc1a59b3471fe195615dfaee6dbd99cfc10be426d1242794a73b46f4c48"} Dec 13 06:51:18 crc kubenswrapper[5048]: I1213 06:51:18.409995 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5082b8c-ec48-4d9c-9204-45580d1bf111","Type":"ContainerStarted","Data":"898fd60076ad5d1260695eca7b413de892b7f327bb68edecf91eb32cd012b7da"} Dec 13 06:51:20 crc kubenswrapper[5048]: I1213 06:51:20.428617 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5082b8c-ec48-4d9c-9204-45580d1bf111","Type":"ContainerStarted","Data":"c04c2e8e9183daf195f86aad9ba8ebf7a328a5b1563e869ca69be60640c63b10"} Dec 13 06:51:20 crc kubenswrapper[5048]: I1213 06:51:20.430300 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 13 06:51:20 crc kubenswrapper[5048]: I1213 06:51:20.454233 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.064150409 podStartE2EDuration="6.454210454s" podCreationTimestamp="2025-12-13 06:51:14 +0000 UTC" firstStartedPulling="2025-12-13 06:51:15.276999991 +0000 UTC m=+1309.143594572" lastFinishedPulling="2025-12-13 06:51:19.667060026 +0000 UTC m=+1313.533654617" observedRunningTime="2025-12-13 06:51:20.449247898 +0000 UTC m=+1314.315842489" watchObservedRunningTime="2025-12-13 06:51:20.454210454 +0000 UTC m=+1314.320805035" Dec 13 06:51:29 crc kubenswrapper[5048]: I1213 06:51:29.645747 5048 generic.go:334] "Generic (PLEG): container finished" podID="cc8db113-49b5-41af-af25-3980140fc49d" containerID="fcdc0428ff0c65f5b4967dd59f7ba95449904abbc314e067bfbb6e57d8f923ee" exitCode=0 Dec 13 06:51:29 crc kubenswrapper[5048]: I1213 06:51:29.646216 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-rgt9h" event={"ID":"cc8db113-49b5-41af-af25-3980140fc49d","Type":"ContainerDied","Data":"fcdc0428ff0c65f5b4967dd59f7ba95449904abbc314e067bfbb6e57d8f923ee"} Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.007686 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-rgt9h" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.066612 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc8db113-49b5-41af-af25-3980140fc49d-config-data\") pod \"cc8db113-49b5-41af-af25-3980140fc49d\" (UID: \"cc8db113-49b5-41af-af25-3980140fc49d\") " Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.066838 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5sqqv\" (UniqueName: \"kubernetes.io/projected/cc8db113-49b5-41af-af25-3980140fc49d-kube-api-access-5sqqv\") pod \"cc8db113-49b5-41af-af25-3980140fc49d\" (UID: \"cc8db113-49b5-41af-af25-3980140fc49d\") " Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.066940 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc8db113-49b5-41af-af25-3980140fc49d-scripts\") pod \"cc8db113-49b5-41af-af25-3980140fc49d\" (UID: \"cc8db113-49b5-41af-af25-3980140fc49d\") " Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.067055 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc8db113-49b5-41af-af25-3980140fc49d-combined-ca-bundle\") pod \"cc8db113-49b5-41af-af25-3980140fc49d\" (UID: \"cc8db113-49b5-41af-af25-3980140fc49d\") " Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.074575 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc8db113-49b5-41af-af25-3980140fc49d-scripts" (OuterVolumeSpecName: "scripts") pod "cc8db113-49b5-41af-af25-3980140fc49d" (UID: "cc8db113-49b5-41af-af25-3980140fc49d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.077643 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc8db113-49b5-41af-af25-3980140fc49d-kube-api-access-5sqqv" (OuterVolumeSpecName: "kube-api-access-5sqqv") pod "cc8db113-49b5-41af-af25-3980140fc49d" (UID: "cc8db113-49b5-41af-af25-3980140fc49d"). InnerVolumeSpecName "kube-api-access-5sqqv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.110625 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc8db113-49b5-41af-af25-3980140fc49d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cc8db113-49b5-41af-af25-3980140fc49d" (UID: "cc8db113-49b5-41af-af25-3980140fc49d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.112007 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc8db113-49b5-41af-af25-3980140fc49d-config-data" (OuterVolumeSpecName: "config-data") pod "cc8db113-49b5-41af-af25-3980140fc49d" (UID: "cc8db113-49b5-41af-af25-3980140fc49d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.169747 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5sqqv\" (UniqueName: \"kubernetes.io/projected/cc8db113-49b5-41af-af25-3980140fc49d-kube-api-access-5sqqv\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.169785 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc8db113-49b5-41af-af25-3980140fc49d-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.169795 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc8db113-49b5-41af-af25-3980140fc49d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.169804 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc8db113-49b5-41af-af25-3980140fc49d-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.668966 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-rgt9h" event={"ID":"cc8db113-49b5-41af-af25-3980140fc49d","Type":"ContainerDied","Data":"9bbc8005e7abee91a1eafa1a42148b77cdc96b7ebed3e004f174534a12b930ee"} Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.669017 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9bbc8005e7abee91a1eafa1a42148b77cdc96b7ebed3e004f174534a12b930ee" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.669306 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-rgt9h" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.752692 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 13 06:51:31 crc kubenswrapper[5048]: E1213 06:51:31.753054 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc8db113-49b5-41af-af25-3980140fc49d" containerName="nova-cell0-conductor-db-sync" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.753070 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc8db113-49b5-41af-af25-3980140fc49d" containerName="nova-cell0-conductor-db-sync" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.753236 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc8db113-49b5-41af-af25-3980140fc49d" containerName="nova-cell0-conductor-db-sync" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.753809 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.756157 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.756351 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-vhhj4" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.762323 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.788512 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e4cb8ce-de40-4d30-beb4-fa5a0dae4e1e-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"1e4cb8ce-de40-4d30-beb4-fa5a0dae4e1e\") " pod="openstack/nova-cell0-conductor-0" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.788967 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgxjg\" (UniqueName: \"kubernetes.io/projected/1e4cb8ce-de40-4d30-beb4-fa5a0dae4e1e-kube-api-access-zgxjg\") pod \"nova-cell0-conductor-0\" (UID: \"1e4cb8ce-de40-4d30-beb4-fa5a0dae4e1e\") " pod="openstack/nova-cell0-conductor-0" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.789005 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e4cb8ce-de40-4d30-beb4-fa5a0dae4e1e-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"1e4cb8ce-de40-4d30-beb4-fa5a0dae4e1e\") " pod="openstack/nova-cell0-conductor-0" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.891138 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e4cb8ce-de40-4d30-beb4-fa5a0dae4e1e-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"1e4cb8ce-de40-4d30-beb4-fa5a0dae4e1e\") " pod="openstack/nova-cell0-conductor-0" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.891295 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgxjg\" (UniqueName: \"kubernetes.io/projected/1e4cb8ce-de40-4d30-beb4-fa5a0dae4e1e-kube-api-access-zgxjg\") pod \"nova-cell0-conductor-0\" (UID: \"1e4cb8ce-de40-4d30-beb4-fa5a0dae4e1e\") " pod="openstack/nova-cell0-conductor-0" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.891339 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e4cb8ce-de40-4d30-beb4-fa5a0dae4e1e-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"1e4cb8ce-de40-4d30-beb4-fa5a0dae4e1e\") " pod="openstack/nova-cell0-conductor-0" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.896781 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e4cb8ce-de40-4d30-beb4-fa5a0dae4e1e-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"1e4cb8ce-de40-4d30-beb4-fa5a0dae4e1e\") " pod="openstack/nova-cell0-conductor-0" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.900638 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e4cb8ce-de40-4d30-beb4-fa5a0dae4e1e-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"1e4cb8ce-de40-4d30-beb4-fa5a0dae4e1e\") " pod="openstack/nova-cell0-conductor-0" Dec 13 06:51:31 crc kubenswrapper[5048]: I1213 06:51:31.923817 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgxjg\" (UniqueName: \"kubernetes.io/projected/1e4cb8ce-de40-4d30-beb4-fa5a0dae4e1e-kube-api-access-zgxjg\") pod \"nova-cell0-conductor-0\" (UID: \"1e4cb8ce-de40-4d30-beb4-fa5a0dae4e1e\") " pod="openstack/nova-cell0-conductor-0" Dec 13 06:51:32 crc kubenswrapper[5048]: I1213 06:51:32.068904 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 13 06:51:32 crc kubenswrapper[5048]: I1213 06:51:32.529110 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 13 06:51:32 crc kubenswrapper[5048]: W1213 06:51:32.533762 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e4cb8ce_de40_4d30_beb4_fa5a0dae4e1e.slice/crio-f205258d6ad96ae16f3857ac7a46f77510cd2178c3a400c9b47f0d650f95f2bc WatchSource:0}: Error finding container f205258d6ad96ae16f3857ac7a46f77510cd2178c3a400c9b47f0d650f95f2bc: Status 404 returned error can't find the container with id f205258d6ad96ae16f3857ac7a46f77510cd2178c3a400c9b47f0d650f95f2bc Dec 13 06:51:32 crc kubenswrapper[5048]: I1213 06:51:32.675687 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"1e4cb8ce-de40-4d30-beb4-fa5a0dae4e1e","Type":"ContainerStarted","Data":"f205258d6ad96ae16f3857ac7a46f77510cd2178c3a400c9b47f0d650f95f2bc"} Dec 13 06:51:33 crc kubenswrapper[5048]: I1213 06:51:33.683816 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"1e4cb8ce-de40-4d30-beb4-fa5a0dae4e1e","Type":"ContainerStarted","Data":"3a9493e48e56eed3a70e8971399f27e4e21e7b1309a02e2e953146f673c73aac"} Dec 13 06:51:33 crc kubenswrapper[5048]: I1213 06:51:33.685665 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Dec 13 06:51:33 crc kubenswrapper[5048]: I1213 06:51:33.711412 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.711396253 podStartE2EDuration="2.711396253s" podCreationTimestamp="2025-12-13 06:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:51:33.700775333 +0000 UTC m=+1327.567369924" watchObservedRunningTime="2025-12-13 06:51:33.711396253 +0000 UTC m=+1327.577990834" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.094183 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.533991 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-npmvk"] Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.535337 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-npmvk" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.538287 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.538359 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.546259 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-npmvk"] Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.595294 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-npmvk\" (UID: \"8b3c1380-a6bf-4bf2-b648-1e548fb07c12\") " pod="openstack/nova-cell0-cell-mapping-npmvk" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.595423 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hqztk\" (UniqueName: \"kubernetes.io/projected/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-kube-api-access-hqztk\") pod \"nova-cell0-cell-mapping-npmvk\" (UID: \"8b3c1380-a6bf-4bf2-b648-1e548fb07c12\") " pod="openstack/nova-cell0-cell-mapping-npmvk" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.595579 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-scripts\") pod \"nova-cell0-cell-mapping-npmvk\" (UID: \"8b3c1380-a6bf-4bf2-b648-1e548fb07c12\") " pod="openstack/nova-cell0-cell-mapping-npmvk" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.595691 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-config-data\") pod \"nova-cell0-cell-mapping-npmvk\" (UID: \"8b3c1380-a6bf-4bf2-b648-1e548fb07c12\") " pod="openstack/nova-cell0-cell-mapping-npmvk" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.689292 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.690694 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.693093 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.697151 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hqztk\" (UniqueName: \"kubernetes.io/projected/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-kube-api-access-hqztk\") pod \"nova-cell0-cell-mapping-npmvk\" (UID: \"8b3c1380-a6bf-4bf2-b648-1e548fb07c12\") " pod="openstack/nova-cell0-cell-mapping-npmvk" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.697215 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-scripts\") pod \"nova-cell0-cell-mapping-npmvk\" (UID: \"8b3c1380-a6bf-4bf2-b648-1e548fb07c12\") " pod="openstack/nova-cell0-cell-mapping-npmvk" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.697258 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-config-data\") pod \"nova-cell0-cell-mapping-npmvk\" (UID: \"8b3c1380-a6bf-4bf2-b648-1e548fb07c12\") " pod="openstack/nova-cell0-cell-mapping-npmvk" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.697408 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-npmvk\" (UID: \"8b3c1380-a6bf-4bf2-b648-1e548fb07c12\") " pod="openstack/nova-cell0-cell-mapping-npmvk" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.702969 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-npmvk\" (UID: \"8b3c1380-a6bf-4bf2-b648-1e548fb07c12\") " pod="openstack/nova-cell0-cell-mapping-npmvk" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.708032 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-scripts\") pod \"nova-cell0-cell-mapping-npmvk\" (UID: \"8b3c1380-a6bf-4bf2-b648-1e548fb07c12\") " pod="openstack/nova-cell0-cell-mapping-npmvk" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.717763 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.726472 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-config-data\") pod \"nova-cell0-cell-mapping-npmvk\" (UID: \"8b3c1380-a6bf-4bf2-b648-1e548fb07c12\") " pod="openstack/nova-cell0-cell-mapping-npmvk" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.750192 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqztk\" (UniqueName: \"kubernetes.io/projected/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-kube-api-access-hqztk\") pod \"nova-cell0-cell-mapping-npmvk\" (UID: \"8b3c1380-a6bf-4bf2-b648-1e548fb07c12\") " pod="openstack/nova-cell0-cell-mapping-npmvk" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.771156 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.772956 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.777774 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.796869 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.801502 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/233040b0-ce2e-4fe7-8863-9093c75e3e7a-config-data\") pod \"nova-metadata-0\" (UID: \"233040b0-ce2e-4fe7-8863-9093c75e3e7a\") " pod="openstack/nova-metadata-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.801558 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rcdll\" (UniqueName: \"kubernetes.io/projected/233040b0-ce2e-4fe7-8863-9093c75e3e7a-kube-api-access-rcdll\") pod \"nova-metadata-0\" (UID: \"233040b0-ce2e-4fe7-8863-9093c75e3e7a\") " pod="openstack/nova-metadata-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.801590 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/233040b0-ce2e-4fe7-8863-9093c75e3e7a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"233040b0-ce2e-4fe7-8863-9093c75e3e7a\") " pod="openstack/nova-metadata-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.801613 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f1dc8fe-cba2-4a26-9d79-42f3be73edb3-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5f1dc8fe-cba2-4a26-9d79-42f3be73edb3\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.801711 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hq49\" (UniqueName: \"kubernetes.io/projected/5f1dc8fe-cba2-4a26-9d79-42f3be73edb3-kube-api-access-5hq49\") pod \"nova-cell1-novncproxy-0\" (UID: \"5f1dc8fe-cba2-4a26-9d79-42f3be73edb3\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.801737 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/233040b0-ce2e-4fe7-8863-9093c75e3e7a-logs\") pod \"nova-metadata-0\" (UID: \"233040b0-ce2e-4fe7-8863-9093c75e3e7a\") " pod="openstack/nova-metadata-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.801756 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f1dc8fe-cba2-4a26-9d79-42f3be73edb3-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5f1dc8fe-cba2-4a26-9d79-42f3be73edb3\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.821181 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.823565 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.835814 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.860117 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.868362 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-npmvk" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.917669 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhkpq\" (UniqueName: \"kubernetes.io/projected/3a656ea3-a5ba-4fbc-ba4b-4c220207e764-kube-api-access-jhkpq\") pod \"nova-scheduler-0\" (UID: \"3a656ea3-a5ba-4fbc-ba4b-4c220207e764\") " pod="openstack/nova-scheduler-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.917743 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/233040b0-ce2e-4fe7-8863-9093c75e3e7a-config-data\") pod \"nova-metadata-0\" (UID: \"233040b0-ce2e-4fe7-8863-9093c75e3e7a\") " pod="openstack/nova-metadata-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.917772 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rcdll\" (UniqueName: \"kubernetes.io/projected/233040b0-ce2e-4fe7-8863-9093c75e3e7a-kube-api-access-rcdll\") pod \"nova-metadata-0\" (UID: \"233040b0-ce2e-4fe7-8863-9093c75e3e7a\") " pod="openstack/nova-metadata-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.917812 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/233040b0-ce2e-4fe7-8863-9093c75e3e7a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"233040b0-ce2e-4fe7-8863-9093c75e3e7a\") " pod="openstack/nova-metadata-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.917836 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a656ea3-a5ba-4fbc-ba4b-4c220207e764-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3a656ea3-a5ba-4fbc-ba4b-4c220207e764\") " pod="openstack/nova-scheduler-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.917869 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f1dc8fe-cba2-4a26-9d79-42f3be73edb3-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5f1dc8fe-cba2-4a26-9d79-42f3be73edb3\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.917905 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a656ea3-a5ba-4fbc-ba4b-4c220207e764-config-data\") pod \"nova-scheduler-0\" (UID: \"3a656ea3-a5ba-4fbc-ba4b-4c220207e764\") " pod="openstack/nova-scheduler-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.918094 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hq49\" (UniqueName: \"kubernetes.io/projected/5f1dc8fe-cba2-4a26-9d79-42f3be73edb3-kube-api-access-5hq49\") pod \"nova-cell1-novncproxy-0\" (UID: \"5f1dc8fe-cba2-4a26-9d79-42f3be73edb3\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.918151 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/233040b0-ce2e-4fe7-8863-9093c75e3e7a-logs\") pod \"nova-metadata-0\" (UID: \"233040b0-ce2e-4fe7-8863-9093c75e3e7a\") " pod="openstack/nova-metadata-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.918175 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f1dc8fe-cba2-4a26-9d79-42f3be73edb3-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5f1dc8fe-cba2-4a26-9d79-42f3be73edb3\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.924511 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f1dc8fe-cba2-4a26-9d79-42f3be73edb3-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5f1dc8fe-cba2-4a26-9d79-42f3be73edb3\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.925112 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/233040b0-ce2e-4fe7-8863-9093c75e3e7a-logs\") pod \"nova-metadata-0\" (UID: \"233040b0-ce2e-4fe7-8863-9093c75e3e7a\") " pod="openstack/nova-metadata-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.929375 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/233040b0-ce2e-4fe7-8863-9093c75e3e7a-config-data\") pod \"nova-metadata-0\" (UID: \"233040b0-ce2e-4fe7-8863-9093c75e3e7a\") " pod="openstack/nova-metadata-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.929921 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f1dc8fe-cba2-4a26-9d79-42f3be73edb3-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5f1dc8fe-cba2-4a26-9d79-42f3be73edb3\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.941115 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/233040b0-ce2e-4fe7-8863-9093c75e3e7a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"233040b0-ce2e-4fe7-8863-9093c75e3e7a\") " pod="openstack/nova-metadata-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.949782 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rcdll\" (UniqueName: \"kubernetes.io/projected/233040b0-ce2e-4fe7-8863-9093c75e3e7a-kube-api-access-rcdll\") pod \"nova-metadata-0\" (UID: \"233040b0-ce2e-4fe7-8863-9093c75e3e7a\") " pod="openstack/nova-metadata-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.949879 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.964898 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.970282 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.970750 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.975927 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-mqjdq"] Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.977289 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:51:37 crc kubenswrapper[5048]: I1213 06:51:37.982188 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hq49\" (UniqueName: \"kubernetes.io/projected/5f1dc8fe-cba2-4a26-9d79-42f3be73edb3-kube-api-access-5hq49\") pod \"nova-cell1-novncproxy-0\" (UID: \"5f1dc8fe-cba2-4a26-9d79-42f3be73edb3\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.023295 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-mqjdq"] Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.024242 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvlvl\" (UniqueName: \"kubernetes.io/projected/4a966382-7a0b-439d-a1f0-9a08ae863aa0-kube-api-access-wvlvl\") pod \"dnsmasq-dns-bccf8f775-mqjdq\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.024316 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-mqjdq\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.024345 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-config\") pod \"dnsmasq-dns-bccf8f775-mqjdq\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.024363 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9360f872-f320-4ae7-a11c-5d859e76db22-config-data\") pod \"nova-api-0\" (UID: \"9360f872-f320-4ae7-a11c-5d859e76db22\") " pod="openstack/nova-api-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.024388 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhkpq\" (UniqueName: \"kubernetes.io/projected/3a656ea3-a5ba-4fbc-ba4b-4c220207e764-kube-api-access-jhkpq\") pod \"nova-scheduler-0\" (UID: \"3a656ea3-a5ba-4fbc-ba4b-4c220207e764\") " pod="openstack/nova-scheduler-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.024420 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdgbh\" (UniqueName: \"kubernetes.io/projected/9360f872-f320-4ae7-a11c-5d859e76db22-kube-api-access-mdgbh\") pod \"nova-api-0\" (UID: \"9360f872-f320-4ae7-a11c-5d859e76db22\") " pod="openstack/nova-api-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.024453 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a656ea3-a5ba-4fbc-ba4b-4c220207e764-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3a656ea3-a5ba-4fbc-ba4b-4c220207e764\") " pod="openstack/nova-scheduler-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.024478 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-mqjdq\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.024493 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a656ea3-a5ba-4fbc-ba4b-4c220207e764-config-data\") pod \"nova-scheduler-0\" (UID: \"3a656ea3-a5ba-4fbc-ba4b-4c220207e764\") " pod="openstack/nova-scheduler-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.025410 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-mqjdq\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.025467 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9360f872-f320-4ae7-a11c-5d859e76db22-logs\") pod \"nova-api-0\" (UID: \"9360f872-f320-4ae7-a11c-5d859e76db22\") " pod="openstack/nova-api-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.025505 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-dns-svc\") pod \"dnsmasq-dns-bccf8f775-mqjdq\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.025552 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9360f872-f320-4ae7-a11c-5d859e76db22-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9360f872-f320-4ae7-a11c-5d859e76db22\") " pod="openstack/nova-api-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.031179 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a656ea3-a5ba-4fbc-ba4b-4c220207e764-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3a656ea3-a5ba-4fbc-ba4b-4c220207e764\") " pod="openstack/nova-scheduler-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.031859 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a656ea3-a5ba-4fbc-ba4b-4c220207e764-config-data\") pod \"nova-scheduler-0\" (UID: \"3a656ea3-a5ba-4fbc-ba4b-4c220207e764\") " pod="openstack/nova-scheduler-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.050242 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhkpq\" (UniqueName: \"kubernetes.io/projected/3a656ea3-a5ba-4fbc-ba4b-4c220207e764-kube-api-access-jhkpq\") pod \"nova-scheduler-0\" (UID: \"3a656ea3-a5ba-4fbc-ba4b-4c220207e764\") " pod="openstack/nova-scheduler-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.127219 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-mqjdq\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.127266 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-config\") pod \"dnsmasq-dns-bccf8f775-mqjdq\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.127286 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9360f872-f320-4ae7-a11c-5d859e76db22-config-data\") pod \"nova-api-0\" (UID: \"9360f872-f320-4ae7-a11c-5d859e76db22\") " pod="openstack/nova-api-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.127324 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdgbh\" (UniqueName: \"kubernetes.io/projected/9360f872-f320-4ae7-a11c-5d859e76db22-kube-api-access-mdgbh\") pod \"nova-api-0\" (UID: \"9360f872-f320-4ae7-a11c-5d859e76db22\") " pod="openstack/nova-api-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.127352 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-mqjdq\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.127378 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-mqjdq\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.127399 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9360f872-f320-4ae7-a11c-5d859e76db22-logs\") pod \"nova-api-0\" (UID: \"9360f872-f320-4ae7-a11c-5d859e76db22\") " pod="openstack/nova-api-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.127432 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-dns-svc\") pod \"dnsmasq-dns-bccf8f775-mqjdq\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.127480 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9360f872-f320-4ae7-a11c-5d859e76db22-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9360f872-f320-4ae7-a11c-5d859e76db22\") " pod="openstack/nova-api-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.127505 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvlvl\" (UniqueName: \"kubernetes.io/projected/4a966382-7a0b-439d-a1f0-9a08ae863aa0-kube-api-access-wvlvl\") pod \"dnsmasq-dns-bccf8f775-mqjdq\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.128887 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-config\") pod \"dnsmasq-dns-bccf8f775-mqjdq\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.128907 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-mqjdq\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.128964 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9360f872-f320-4ae7-a11c-5d859e76db22-logs\") pod \"nova-api-0\" (UID: \"9360f872-f320-4ae7-a11c-5d859e76db22\") " pod="openstack/nova-api-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.129341 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-mqjdq\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.143146 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-mqjdq\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.144777 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-dns-svc\") pod \"dnsmasq-dns-bccf8f775-mqjdq\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.144827 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.155203 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9360f872-f320-4ae7-a11c-5d859e76db22-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9360f872-f320-4ae7-a11c-5d859e76db22\") " pod="openstack/nova-api-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.155800 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9360f872-f320-4ae7-a11c-5d859e76db22-config-data\") pod \"nova-api-0\" (UID: \"9360f872-f320-4ae7-a11c-5d859e76db22\") " pod="openstack/nova-api-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.176357 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.177129 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdgbh\" (UniqueName: \"kubernetes.io/projected/9360f872-f320-4ae7-a11c-5d859e76db22-kube-api-access-mdgbh\") pod \"nova-api-0\" (UID: \"9360f872-f320-4ae7-a11c-5d859e76db22\") " pod="openstack/nova-api-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.184246 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.209373 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvlvl\" (UniqueName: \"kubernetes.io/projected/4a966382-7a0b-439d-a1f0-9a08ae863aa0-kube-api-access-wvlvl\") pod \"dnsmasq-dns-bccf8f775-mqjdq\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.377926 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.394713 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.620867 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-npmvk"] Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.747683 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-npmvk" event={"ID":"8b3c1380-a6bf-4bf2-b648-1e548fb07c12","Type":"ContainerStarted","Data":"a88ed9619b2a5bda5a3d73d49441f19eaa89c5ba42800c29d01f1d4038409f25"} Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.797889 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.808325 5048 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.888709 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 13 06:51:38 crc kubenswrapper[5048]: I1213 06:51:38.920777 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.022782 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-v2pzq"] Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.024153 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-v2pzq" Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.027695 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.027705 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.030788 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-v2pzq"] Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.058024 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d733f09-6f54-4b02-b44e-81bc02321044-scripts\") pod \"nova-cell1-conductor-db-sync-v2pzq\" (UID: \"6d733f09-6f54-4b02-b44e-81bc02321044\") " pod="openstack/nova-cell1-conductor-db-sync-v2pzq" Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.058092 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9lsp\" (UniqueName: \"kubernetes.io/projected/6d733f09-6f54-4b02-b44e-81bc02321044-kube-api-access-x9lsp\") pod \"nova-cell1-conductor-db-sync-v2pzq\" (UID: \"6d733f09-6f54-4b02-b44e-81bc02321044\") " pod="openstack/nova-cell1-conductor-db-sync-v2pzq" Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.058116 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d733f09-6f54-4b02-b44e-81bc02321044-config-data\") pod \"nova-cell1-conductor-db-sync-v2pzq\" (UID: \"6d733f09-6f54-4b02-b44e-81bc02321044\") " pod="openstack/nova-cell1-conductor-db-sync-v2pzq" Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.058200 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d733f09-6f54-4b02-b44e-81bc02321044-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-v2pzq\" (UID: \"6d733f09-6f54-4b02-b44e-81bc02321044\") " pod="openstack/nova-cell1-conductor-db-sync-v2pzq" Dec 13 06:51:39 crc kubenswrapper[5048]: W1213 06:51:39.076813 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9360f872_f320_4ae7_a11c_5d859e76db22.slice/crio-d95ab645d8e41cdb0192433b15ee10c44edcd69496486ece92981107ac7f180f WatchSource:0}: Error finding container d95ab645d8e41cdb0192433b15ee10c44edcd69496486ece92981107ac7f180f: Status 404 returned error can't find the container with id d95ab645d8e41cdb0192433b15ee10c44edcd69496486ece92981107ac7f180f Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.085329 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.094045 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-mqjdq"] Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.159981 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d733f09-6f54-4b02-b44e-81bc02321044-scripts\") pod \"nova-cell1-conductor-db-sync-v2pzq\" (UID: \"6d733f09-6f54-4b02-b44e-81bc02321044\") " pod="openstack/nova-cell1-conductor-db-sync-v2pzq" Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.160061 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9lsp\" (UniqueName: \"kubernetes.io/projected/6d733f09-6f54-4b02-b44e-81bc02321044-kube-api-access-x9lsp\") pod \"nova-cell1-conductor-db-sync-v2pzq\" (UID: \"6d733f09-6f54-4b02-b44e-81bc02321044\") " pod="openstack/nova-cell1-conductor-db-sync-v2pzq" Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.160090 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d733f09-6f54-4b02-b44e-81bc02321044-config-data\") pod \"nova-cell1-conductor-db-sync-v2pzq\" (UID: \"6d733f09-6f54-4b02-b44e-81bc02321044\") " pod="openstack/nova-cell1-conductor-db-sync-v2pzq" Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.160181 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d733f09-6f54-4b02-b44e-81bc02321044-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-v2pzq\" (UID: \"6d733f09-6f54-4b02-b44e-81bc02321044\") " pod="openstack/nova-cell1-conductor-db-sync-v2pzq" Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.163873 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d733f09-6f54-4b02-b44e-81bc02321044-scripts\") pod \"nova-cell1-conductor-db-sync-v2pzq\" (UID: \"6d733f09-6f54-4b02-b44e-81bc02321044\") " pod="openstack/nova-cell1-conductor-db-sync-v2pzq" Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.164987 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d733f09-6f54-4b02-b44e-81bc02321044-config-data\") pod \"nova-cell1-conductor-db-sync-v2pzq\" (UID: \"6d733f09-6f54-4b02-b44e-81bc02321044\") " pod="openstack/nova-cell1-conductor-db-sync-v2pzq" Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.165744 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d733f09-6f54-4b02-b44e-81bc02321044-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-v2pzq\" (UID: \"6d733f09-6f54-4b02-b44e-81bc02321044\") " pod="openstack/nova-cell1-conductor-db-sync-v2pzq" Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.179118 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9lsp\" (UniqueName: \"kubernetes.io/projected/6d733f09-6f54-4b02-b44e-81bc02321044-kube-api-access-x9lsp\") pod \"nova-cell1-conductor-db-sync-v2pzq\" (UID: \"6d733f09-6f54-4b02-b44e-81bc02321044\") " pod="openstack/nova-cell1-conductor-db-sync-v2pzq" Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.343486 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-v2pzq" Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.776990 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-npmvk" event={"ID":"8b3c1380-a6bf-4bf2-b648-1e548fb07c12","Type":"ContainerStarted","Data":"2b9a2f24de943544ad9499b48208b1f382c3bfa4b625bdf0af56acfb6bd6232a"} Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.781024 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3a656ea3-a5ba-4fbc-ba4b-4c220207e764","Type":"ContainerStarted","Data":"84c6c8e267e78c32fddef76a41bb65f605046fb99c61f0ecd8ce35061e425fa0"} Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.797506 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-v2pzq"] Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.806075 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"233040b0-ce2e-4fe7-8863-9093c75e3e7a","Type":"ContainerStarted","Data":"1df6776915fd8077262b7ad37482527c89e9e5836babaf83956f7a2a648059cf"} Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.808937 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-npmvk" podStartSLOduration=2.808918882 podStartE2EDuration="2.808918882s" podCreationTimestamp="2025-12-13 06:51:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:51:39.797404547 +0000 UTC m=+1333.663999128" watchObservedRunningTime="2025-12-13 06:51:39.808918882 +0000 UTC m=+1333.675513463" Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.818176 5048 generic.go:334] "Generic (PLEG): container finished" podID="4a966382-7a0b-439d-a1f0-9a08ae863aa0" containerID="bce8de951878a4eccea558410f2fadc4eaa5eb9ac83684e6de4f177084142724" exitCode=0 Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.818247 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" event={"ID":"4a966382-7a0b-439d-a1f0-9a08ae863aa0","Type":"ContainerDied","Data":"bce8de951878a4eccea558410f2fadc4eaa5eb9ac83684e6de4f177084142724"} Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.818277 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" event={"ID":"4a966382-7a0b-439d-a1f0-9a08ae863aa0","Type":"ContainerStarted","Data":"34210ed7cf36c3dced2c4e965ae55b2274d8af6214309c79dd0979980713a35c"} Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.821708 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9360f872-f320-4ae7-a11c-5d859e76db22","Type":"ContainerStarted","Data":"d95ab645d8e41cdb0192433b15ee10c44edcd69496486ece92981107ac7f180f"} Dec 13 06:51:39 crc kubenswrapper[5048]: I1213 06:51:39.828038 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5f1dc8fe-cba2-4a26-9d79-42f3be73edb3","Type":"ContainerStarted","Data":"ff7f49b667160ef35f0628eda22a212e48446bcfe5732e262a885d0668e7e20d"} Dec 13 06:51:39 crc kubenswrapper[5048]: W1213 06:51:39.837495 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d733f09_6f54_4b02_b44e_81bc02321044.slice/crio-722a4ba635a995add53d162a1c62d31f4a7e3cc665630d80141f99931737ccfa WatchSource:0}: Error finding container 722a4ba635a995add53d162a1c62d31f4a7e3cc665630d80141f99931737ccfa: Status 404 returned error can't find the container with id 722a4ba635a995add53d162a1c62d31f4a7e3cc665630d80141f99931737ccfa Dec 13 06:51:40 crc kubenswrapper[5048]: I1213 06:51:40.840746 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-v2pzq" event={"ID":"6d733f09-6f54-4b02-b44e-81bc02321044","Type":"ContainerStarted","Data":"e758178fd2b3c4b94a78172706bea64d76e51dee0040adad084fd4253868ed82"} Dec 13 06:51:40 crc kubenswrapper[5048]: I1213 06:51:40.842083 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-v2pzq" event={"ID":"6d733f09-6f54-4b02-b44e-81bc02321044","Type":"ContainerStarted","Data":"722a4ba635a995add53d162a1c62d31f4a7e3cc665630d80141f99931737ccfa"} Dec 13 06:51:40 crc kubenswrapper[5048]: I1213 06:51:40.850151 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" event={"ID":"4a966382-7a0b-439d-a1f0-9a08ae863aa0","Type":"ContainerStarted","Data":"b271d4fc477426e860c372ceadf63bedb721b7d48fae352ba3bf1095822d04c6"} Dec 13 06:51:40 crc kubenswrapper[5048]: I1213 06:51:40.862988 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-v2pzq" podStartSLOduration=2.862969813 podStartE2EDuration="2.862969813s" podCreationTimestamp="2025-12-13 06:51:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:51:40.857426802 +0000 UTC m=+1334.724021383" watchObservedRunningTime="2025-12-13 06:51:40.862969813 +0000 UTC m=+1334.729564394" Dec 13 06:51:40 crc kubenswrapper[5048]: I1213 06:51:40.886776 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" podStartSLOduration=3.8867599630000003 podStartE2EDuration="3.886759963s" podCreationTimestamp="2025-12-13 06:51:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:51:40.880863652 +0000 UTC m=+1334.747458233" watchObservedRunningTime="2025-12-13 06:51:40.886759963 +0000 UTC m=+1334.753354544" Dec 13 06:51:41 crc kubenswrapper[5048]: I1213 06:51:41.441253 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 13 06:51:41 crc kubenswrapper[5048]: I1213 06:51:41.451497 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 13 06:51:41 crc kubenswrapper[5048]: I1213 06:51:41.859886 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:51:42 crc kubenswrapper[5048]: I1213 06:51:42.875244 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3a656ea3-a5ba-4fbc-ba4b-4c220207e764","Type":"ContainerStarted","Data":"740a7a6cd5b5ec8d9ce16bdb3b4678f966dbbf8c336035b2f68dda3cb2459d22"} Dec 13 06:51:42 crc kubenswrapper[5048]: I1213 06:51:42.882539 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"233040b0-ce2e-4fe7-8863-9093c75e3e7a","Type":"ContainerStarted","Data":"34f65462a5d92b583686ebaf6f7ba9685249faf91354fabb593541bdaf0bfff1"} Dec 13 06:51:42 crc kubenswrapper[5048]: I1213 06:51:42.882729 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="233040b0-ce2e-4fe7-8863-9093c75e3e7a" containerName="nova-metadata-log" containerID="cri-o://34f65462a5d92b583686ebaf6f7ba9685249faf91354fabb593541bdaf0bfff1" gracePeriod=30 Dec 13 06:51:42 crc kubenswrapper[5048]: I1213 06:51:42.883024 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="233040b0-ce2e-4fe7-8863-9093c75e3e7a" containerName="nova-metadata-metadata" containerID="cri-o://16c8243e3ab3c804f6f643c78f667d6b578c6e92c17a2161214d8e3e51f3a441" gracePeriod=30 Dec 13 06:51:42 crc kubenswrapper[5048]: I1213 06:51:42.894230 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9360f872-f320-4ae7-a11c-5d859e76db22","Type":"ContainerStarted","Data":"e61ed9918f95fb0ab81c8c3351653ef25314136fd9f18eb9b8f2bcdb780f76d7"} Dec 13 06:51:42 crc kubenswrapper[5048]: I1213 06:51:42.894296 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9360f872-f320-4ae7-a11c-5d859e76db22","Type":"ContainerStarted","Data":"45ea782cfefb7ee828089f889ada280c0b1071d35bf6cca6a18c9d6e2b590cdb"} Dec 13 06:51:42 crc kubenswrapper[5048]: I1213 06:51:42.896675 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.582544589 podStartE2EDuration="5.896646853s" podCreationTimestamp="2025-12-13 06:51:37 +0000 UTC" firstStartedPulling="2025-12-13 06:51:38.983804927 +0000 UTC m=+1332.850399498" lastFinishedPulling="2025-12-13 06:51:42.297907181 +0000 UTC m=+1336.164501762" observedRunningTime="2025-12-13 06:51:42.893492458 +0000 UTC m=+1336.760087049" watchObservedRunningTime="2025-12-13 06:51:42.896646853 +0000 UTC m=+1336.763241444" Dec 13 06:51:42 crc kubenswrapper[5048]: I1213 06:51:42.900512 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="5f1dc8fe-cba2-4a26-9d79-42f3be73edb3" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://4dca945788a50969ff033b9d4931cb3df06cbcf96f0246ec4db9dcb9b3bcf8a1" gracePeriod=30 Dec 13 06:51:42 crc kubenswrapper[5048]: I1213 06:51:42.900757 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5f1dc8fe-cba2-4a26-9d79-42f3be73edb3","Type":"ContainerStarted","Data":"4dca945788a50969ff033b9d4931cb3df06cbcf96f0246ec4db9dcb9b3bcf8a1"} Dec 13 06:51:42 crc kubenswrapper[5048]: I1213 06:51:42.925841 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.598997606 podStartE2EDuration="5.925808467s" podCreationTimestamp="2025-12-13 06:51:37 +0000 UTC" firstStartedPulling="2025-12-13 06:51:38.984059964 +0000 UTC m=+1332.850654545" lastFinishedPulling="2025-12-13 06:51:42.310870825 +0000 UTC m=+1336.177465406" observedRunningTime="2025-12-13 06:51:42.916019347 +0000 UTC m=+1336.782613948" watchObservedRunningTime="2025-12-13 06:51:42.925808467 +0000 UTC m=+1336.792403048" Dec 13 06:51:42 crc kubenswrapper[5048]: I1213 06:51:42.951597 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.460802831 podStartE2EDuration="5.951560492s" podCreationTimestamp="2025-12-13 06:51:37 +0000 UTC" firstStartedPulling="2025-12-13 06:51:38.808149928 +0000 UTC m=+1332.674744509" lastFinishedPulling="2025-12-13 06:51:42.298907589 +0000 UTC m=+1336.165502170" observedRunningTime="2025-12-13 06:51:42.93191787 +0000 UTC m=+1336.798512451" watchObservedRunningTime="2025-12-13 06:51:42.951560492 +0000 UTC m=+1336.818155083" Dec 13 06:51:42 crc kubenswrapper[5048]: I1213 06:51:42.961804 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.743436946 podStartE2EDuration="5.961784604s" podCreationTimestamp="2025-12-13 06:51:37 +0000 UTC" firstStartedPulling="2025-12-13 06:51:39.079955784 +0000 UTC m=+1332.946550365" lastFinishedPulling="2025-12-13 06:51:42.298303442 +0000 UTC m=+1336.164898023" observedRunningTime="2025-12-13 06:51:42.956566535 +0000 UTC m=+1336.823161126" watchObservedRunningTime="2025-12-13 06:51:42.961784604 +0000 UTC m=+1336.828379185" Dec 13 06:51:43 crc kubenswrapper[5048]: I1213 06:51:43.145550 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:51:43 crc kubenswrapper[5048]: I1213 06:51:43.177668 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 13 06:51:43 crc kubenswrapper[5048]: I1213 06:51:43.177722 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 13 06:51:43 crc kubenswrapper[5048]: I1213 06:51:43.184969 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 13 06:51:43 crc kubenswrapper[5048]: I1213 06:51:43.910235 5048 generic.go:334] "Generic (PLEG): container finished" podID="233040b0-ce2e-4fe7-8863-9093c75e3e7a" containerID="34f65462a5d92b583686ebaf6f7ba9685249faf91354fabb593541bdaf0bfff1" exitCode=143 Dec 13 06:51:43 crc kubenswrapper[5048]: I1213 06:51:43.910753 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"233040b0-ce2e-4fe7-8863-9093c75e3e7a","Type":"ContainerStarted","Data":"16c8243e3ab3c804f6f643c78f667d6b578c6e92c17a2161214d8e3e51f3a441"} Dec 13 06:51:43 crc kubenswrapper[5048]: I1213 06:51:43.910870 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"233040b0-ce2e-4fe7-8863-9093c75e3e7a","Type":"ContainerDied","Data":"34f65462a5d92b583686ebaf6f7ba9685249faf91354fabb593541bdaf0bfff1"} Dec 13 06:51:44 crc kubenswrapper[5048]: I1213 06:51:44.813890 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 13 06:51:46 crc kubenswrapper[5048]: I1213 06:51:46.938234 5048 generic.go:334] "Generic (PLEG): container finished" podID="6d733f09-6f54-4b02-b44e-81bc02321044" containerID="e758178fd2b3c4b94a78172706bea64d76e51dee0040adad084fd4253868ed82" exitCode=0 Dec 13 06:51:46 crc kubenswrapper[5048]: I1213 06:51:46.938349 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-v2pzq" event={"ID":"6d733f09-6f54-4b02-b44e-81bc02321044","Type":"ContainerDied","Data":"e758178fd2b3c4b94a78172706bea64d76e51dee0040adad084fd4253868ed82"} Dec 13 06:51:46 crc kubenswrapper[5048]: I1213 06:51:46.942181 5048 generic.go:334] "Generic (PLEG): container finished" podID="8b3c1380-a6bf-4bf2-b648-1e548fb07c12" containerID="2b9a2f24de943544ad9499b48208b1f382c3bfa4b625bdf0af56acfb6bd6232a" exitCode=0 Dec 13 06:51:46 crc kubenswrapper[5048]: I1213 06:51:46.942349 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-npmvk" event={"ID":"8b3c1380-a6bf-4bf2-b648-1e548fb07c12","Type":"ContainerDied","Data":"2b9a2f24de943544ad9499b48208b1f382c3bfa4b625bdf0af56acfb6bd6232a"} Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.185741 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.238006 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.327720 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-npmvk" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.383100 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.383134 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.400420 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.500131 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-combined-ca-bundle\") pod \"8b3c1380-a6bf-4bf2-b648-1e548fb07c12\" (UID: \"8b3c1380-a6bf-4bf2-b648-1e548fb07c12\") " Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.500206 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-scripts\") pod \"8b3c1380-a6bf-4bf2-b648-1e548fb07c12\" (UID: \"8b3c1380-a6bf-4bf2-b648-1e548fb07c12\") " Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.500403 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hqztk\" (UniqueName: \"kubernetes.io/projected/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-kube-api-access-hqztk\") pod \"8b3c1380-a6bf-4bf2-b648-1e548fb07c12\" (UID: \"8b3c1380-a6bf-4bf2-b648-1e548fb07c12\") " Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.500521 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-config-data\") pod \"8b3c1380-a6bf-4bf2-b648-1e548fb07c12\" (UID: \"8b3c1380-a6bf-4bf2-b648-1e548fb07c12\") " Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.535627 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-scripts" (OuterVolumeSpecName: "scripts") pod "8b3c1380-a6bf-4bf2-b648-1e548fb07c12" (UID: "8b3c1380-a6bf-4bf2-b648-1e548fb07c12"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.538110 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-v2pzq" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.540126 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-kube-api-access-hqztk" (OuterVolumeSpecName: "kube-api-access-hqztk") pod "8b3c1380-a6bf-4bf2-b648-1e548fb07c12" (UID: "8b3c1380-a6bf-4bf2-b648-1e548fb07c12"). InnerVolumeSpecName "kube-api-access-hqztk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.564495 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-qhcq5"] Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.564738 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" podUID="a2040734-c367-448b-a7ce-762b168b35c0" containerName="dnsmasq-dns" containerID="cri-o://05fa0bd6ec99bd6dc21bd959fcc82c9750715f411ddc755c2126da1d1d995b25" gracePeriod=10 Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.580335 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" podUID="a2040734-c367-448b-a7ce-762b168b35c0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.161:5353: connect: connection refused" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.602779 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d733f09-6f54-4b02-b44e-81bc02321044-scripts\") pod \"6d733f09-6f54-4b02-b44e-81bc02321044\" (UID: \"6d733f09-6f54-4b02-b44e-81bc02321044\") " Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.602838 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x9lsp\" (UniqueName: \"kubernetes.io/projected/6d733f09-6f54-4b02-b44e-81bc02321044-kube-api-access-x9lsp\") pod \"6d733f09-6f54-4b02-b44e-81bc02321044\" (UID: \"6d733f09-6f54-4b02-b44e-81bc02321044\") " Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.602907 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d733f09-6f54-4b02-b44e-81bc02321044-config-data\") pod \"6d733f09-6f54-4b02-b44e-81bc02321044\" (UID: \"6d733f09-6f54-4b02-b44e-81bc02321044\") " Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.602977 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d733f09-6f54-4b02-b44e-81bc02321044-combined-ca-bundle\") pod \"6d733f09-6f54-4b02-b44e-81bc02321044\" (UID: \"6d733f09-6f54-4b02-b44e-81bc02321044\") " Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.604718 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hqztk\" (UniqueName: \"kubernetes.io/projected/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-kube-api-access-hqztk\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.605195 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.617643 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-config-data" (OuterVolumeSpecName: "config-data") pod "8b3c1380-a6bf-4bf2-b648-1e548fb07c12" (UID: "8b3c1380-a6bf-4bf2-b648-1e548fb07c12"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.617683 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d733f09-6f54-4b02-b44e-81bc02321044-kube-api-access-x9lsp" (OuterVolumeSpecName: "kube-api-access-x9lsp") pod "6d733f09-6f54-4b02-b44e-81bc02321044" (UID: "6d733f09-6f54-4b02-b44e-81bc02321044"). InnerVolumeSpecName "kube-api-access-x9lsp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.633880 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d733f09-6f54-4b02-b44e-81bc02321044-scripts" (OuterVolumeSpecName: "scripts") pod "6d733f09-6f54-4b02-b44e-81bc02321044" (UID: "6d733f09-6f54-4b02-b44e-81bc02321044"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.647574 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8b3c1380-a6bf-4bf2-b648-1e548fb07c12" (UID: "8b3c1380-a6bf-4bf2-b648-1e548fb07c12"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.659769 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d733f09-6f54-4b02-b44e-81bc02321044-config-data" (OuterVolumeSpecName: "config-data") pod "6d733f09-6f54-4b02-b44e-81bc02321044" (UID: "6d733f09-6f54-4b02-b44e-81bc02321044"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.718185 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.718223 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b3c1380-a6bf-4bf2-b648-1e548fb07c12-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.718241 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d733f09-6f54-4b02-b44e-81bc02321044-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.718251 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x9lsp\" (UniqueName: \"kubernetes.io/projected/6d733f09-6f54-4b02-b44e-81bc02321044-kube-api-access-x9lsp\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.718282 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d733f09-6f54-4b02-b44e-81bc02321044-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.755994 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d733f09-6f54-4b02-b44e-81bc02321044-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6d733f09-6f54-4b02-b44e-81bc02321044" (UID: "6d733f09-6f54-4b02-b44e-81bc02321044"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.758347 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.758631 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="97a8bb68-6317-4a75-bf87-f7d6c6cb0023" containerName="kube-state-metrics" containerID="cri-o://b21d600cfe018fabe4afb069145ecc9205ae4c9d25c56bde4b7f4f3dd6e059c2" gracePeriod=30 Dec 13 06:51:48 crc kubenswrapper[5048]: I1213 06:51:48.821407 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d733f09-6f54-4b02-b44e-81bc02321044-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.027236 5048 generic.go:334] "Generic (PLEG): container finished" podID="a2040734-c367-448b-a7ce-762b168b35c0" containerID="05fa0bd6ec99bd6dc21bd959fcc82c9750715f411ddc755c2126da1d1d995b25" exitCode=0 Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.027314 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" event={"ID":"a2040734-c367-448b-a7ce-762b168b35c0","Type":"ContainerDied","Data":"05fa0bd6ec99bd6dc21bd959fcc82c9750715f411ddc755c2126da1d1d995b25"} Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.036070 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-npmvk" event={"ID":"8b3c1380-a6bf-4bf2-b648-1e548fb07c12","Type":"ContainerDied","Data":"a88ed9619b2a5bda5a3d73d49441f19eaa89c5ba42800c29d01f1d4038409f25"} Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.036107 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a88ed9619b2a5bda5a3d73d49441f19eaa89c5ba42800c29d01f1d4038409f25" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.036187 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-npmvk" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.055667 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-v2pzq" event={"ID":"6d733f09-6f54-4b02-b44e-81bc02321044","Type":"ContainerDied","Data":"722a4ba635a995add53d162a1c62d31f4a7e3cc665630d80141f99931737ccfa"} Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.055706 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="722a4ba635a995add53d162a1c62d31f4a7e3cc665630d80141f99931737ccfa" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.055775 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-v2pzq" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.063456 5048 generic.go:334] "Generic (PLEG): container finished" podID="97a8bb68-6317-4a75-bf87-f7d6c6cb0023" containerID="b21d600cfe018fabe4afb069145ecc9205ae4c9d25c56bde4b7f4f3dd6e059c2" exitCode=2 Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.064878 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"97a8bb68-6317-4a75-bf87-f7d6c6cb0023","Type":"ContainerDied","Data":"b21d600cfe018fabe4afb069145ecc9205ae4c9d25c56bde4b7f4f3dd6e059c2"} Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.078090 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 13 06:51:49 crc kubenswrapper[5048]: E1213 06:51:49.078541 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b3c1380-a6bf-4bf2-b648-1e548fb07c12" containerName="nova-manage" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.078564 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b3c1380-a6bf-4bf2-b648-1e548fb07c12" containerName="nova-manage" Dec 13 06:51:49 crc kubenswrapper[5048]: E1213 06:51:49.078592 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d733f09-6f54-4b02-b44e-81bc02321044" containerName="nova-cell1-conductor-db-sync" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.078599 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d733f09-6f54-4b02-b44e-81bc02321044" containerName="nova-cell1-conductor-db-sync" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.078827 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b3c1380-a6bf-4bf2-b648-1e548fb07c12" containerName="nova-manage" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.078851 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d733f09-6f54-4b02-b44e-81bc02321044" containerName="nova-cell1-conductor-db-sync" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.079456 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.085614 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.115606 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.127884 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/219c43b9-12af-4e67-9c5e-93b2e83623b1-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"219c43b9-12af-4e67-9c5e-93b2e83623b1\") " pod="openstack/nova-cell1-conductor-0" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.128188 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mzjs\" (UniqueName: \"kubernetes.io/projected/219c43b9-12af-4e67-9c5e-93b2e83623b1-kube-api-access-7mzjs\") pod \"nova-cell1-conductor-0\" (UID: \"219c43b9-12af-4e67-9c5e-93b2e83623b1\") " pod="openstack/nova-cell1-conductor-0" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.128308 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/219c43b9-12af-4e67-9c5e-93b2e83623b1-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"219c43b9-12af-4e67-9c5e-93b2e83623b1\") " pod="openstack/nova-cell1-conductor-0" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.145231 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.172487 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.172782 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="9360f872-f320-4ae7-a11c-5d859e76db22" containerName="nova-api-log" containerID="cri-o://45ea782cfefb7ee828089f889ada280c0b1071d35bf6cca6a18c9d6e2b590cdb" gracePeriod=30 Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.172951 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="9360f872-f320-4ae7-a11c-5d859e76db22" containerName="nova-api-api" containerID="cri-o://e61ed9918f95fb0ab81c8c3351653ef25314136fd9f18eb9b8f2bcdb780f76d7" gracePeriod=30 Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.175590 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.182659 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.200009 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="9360f872-f320-4ae7-a11c-5d859e76db22" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.186:8774/\": EOF" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.200402 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="9360f872-f320-4ae7-a11c-5d859e76db22" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.186:8774/\": EOF" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.231031 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-dns-svc\") pod \"a2040734-c367-448b-a7ce-762b168b35c0\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.231094 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8sx6f\" (UniqueName: \"kubernetes.io/projected/a2040734-c367-448b-a7ce-762b168b35c0-kube-api-access-8sx6f\") pod \"a2040734-c367-448b-a7ce-762b168b35c0\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.231187 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-ovsdbserver-nb\") pod \"a2040734-c367-448b-a7ce-762b168b35c0\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.231307 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-ovsdbserver-sb\") pod \"a2040734-c367-448b-a7ce-762b168b35c0\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.231365 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-config\") pod \"a2040734-c367-448b-a7ce-762b168b35c0\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.231498 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-dns-swift-storage-0\") pod \"a2040734-c367-448b-a7ce-762b168b35c0\" (UID: \"a2040734-c367-448b-a7ce-762b168b35c0\") " Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.231852 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mzjs\" (UniqueName: \"kubernetes.io/projected/219c43b9-12af-4e67-9c5e-93b2e83623b1-kube-api-access-7mzjs\") pod \"nova-cell1-conductor-0\" (UID: \"219c43b9-12af-4e67-9c5e-93b2e83623b1\") " pod="openstack/nova-cell1-conductor-0" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.231885 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/219c43b9-12af-4e67-9c5e-93b2e83623b1-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"219c43b9-12af-4e67-9c5e-93b2e83623b1\") " pod="openstack/nova-cell1-conductor-0" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.231997 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/219c43b9-12af-4e67-9c5e-93b2e83623b1-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"219c43b9-12af-4e67-9c5e-93b2e83623b1\") " pod="openstack/nova-cell1-conductor-0" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.255045 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/219c43b9-12af-4e67-9c5e-93b2e83623b1-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"219c43b9-12af-4e67-9c5e-93b2e83623b1\") " pod="openstack/nova-cell1-conductor-0" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.255794 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/219c43b9-12af-4e67-9c5e-93b2e83623b1-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"219c43b9-12af-4e67-9c5e-93b2e83623b1\") " pod="openstack/nova-cell1-conductor-0" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.258099 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2040734-c367-448b-a7ce-762b168b35c0-kube-api-access-8sx6f" (OuterVolumeSpecName: "kube-api-access-8sx6f") pod "a2040734-c367-448b-a7ce-762b168b35c0" (UID: "a2040734-c367-448b-a7ce-762b168b35c0"). InnerVolumeSpecName "kube-api-access-8sx6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.284966 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mzjs\" (UniqueName: \"kubernetes.io/projected/219c43b9-12af-4e67-9c5e-93b2e83623b1-kube-api-access-7mzjs\") pod \"nova-cell1-conductor-0\" (UID: \"219c43b9-12af-4e67-9c5e-93b2e83623b1\") " pod="openstack/nova-cell1-conductor-0" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.333185 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8sx6f\" (UniqueName: \"kubernetes.io/projected/a2040734-c367-448b-a7ce-762b168b35c0-kube-api-access-8sx6f\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.340281 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a2040734-c367-448b-a7ce-762b168b35c0" (UID: "a2040734-c367-448b-a7ce-762b168b35c0"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.398825 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a2040734-c367-448b-a7ce-762b168b35c0" (UID: "a2040734-c367-448b-a7ce-762b168b35c0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.404044 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a2040734-c367-448b-a7ce-762b168b35c0" (UID: "a2040734-c367-448b-a7ce-762b168b35c0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.417900 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-config" (OuterVolumeSpecName: "config") pod "a2040734-c367-448b-a7ce-762b168b35c0" (UID: "a2040734-c367-448b-a7ce-762b168b35c0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.430650 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a2040734-c367-448b-a7ce-762b168b35c0" (UID: "a2040734-c367-448b-a7ce-762b168b35c0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.434094 5048 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.434124 5048 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.434135 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.434144 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.434152 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2040734-c367-448b-a7ce-762b168b35c0-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.441652 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.530269 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.536058 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7mlj5\" (UniqueName: \"kubernetes.io/projected/97a8bb68-6317-4a75-bf87-f7d6c6cb0023-kube-api-access-7mlj5\") pod \"97a8bb68-6317-4a75-bf87-f7d6c6cb0023\" (UID: \"97a8bb68-6317-4a75-bf87-f7d6c6cb0023\") " Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.542741 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97a8bb68-6317-4a75-bf87-f7d6c6cb0023-kube-api-access-7mlj5" (OuterVolumeSpecName: "kube-api-access-7mlj5") pod "97a8bb68-6317-4a75-bf87-f7d6c6cb0023" (UID: "97a8bb68-6317-4a75-bf87-f7d6c6cb0023"). InnerVolumeSpecName "kube-api-access-7mlj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.640235 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7mlj5\" (UniqueName: \"kubernetes.io/projected/97a8bb68-6317-4a75-bf87-f7d6c6cb0023-kube-api-access-7mlj5\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:49 crc kubenswrapper[5048]: I1213 06:51:49.925491 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.075003 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.075007 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"97a8bb68-6317-4a75-bf87-f7d6c6cb0023","Type":"ContainerDied","Data":"48fab155b42031a7f20ac61fc1eb8c77b442ac3fd7840f80718b9e14204f7bb2"} Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.075629 5048 scope.go:117] "RemoveContainer" containerID="b21d600cfe018fabe4afb069145ecc9205ae4c9d25c56bde4b7f4f3dd6e059c2" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.085000 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" event={"ID":"a2040734-c367-448b-a7ce-762b168b35c0","Type":"ContainerDied","Data":"fef5f6e239832aac522e81afb92fe5e16fbb5fd20a1c0569000040fcc92cf29f"} Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.085114 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-qhcq5" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.088223 5048 generic.go:334] "Generic (PLEG): container finished" podID="9360f872-f320-4ae7-a11c-5d859e76db22" containerID="45ea782cfefb7ee828089f889ada280c0b1071d35bf6cca6a18c9d6e2b590cdb" exitCode=143 Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.088355 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9360f872-f320-4ae7-a11c-5d859e76db22","Type":"ContainerDied","Data":"45ea782cfefb7ee828089f889ada280c0b1071d35bf6cca6a18c9d6e2b590cdb"} Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.097788 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"219c43b9-12af-4e67-9c5e-93b2e83623b1","Type":"ContainerStarted","Data":"9cf58e0436c0d5787283afd4d050baedb2c75b7bb9ebfd01c82431d58a89e821"} Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.123235 5048 scope.go:117] "RemoveContainer" containerID="05fa0bd6ec99bd6dc21bd959fcc82c9750715f411ddc755c2126da1d1d995b25" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.130566 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.147828 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.159311 5048 scope.go:117] "RemoveContainer" containerID="c5f8ffa3ddf51746ecbf0912f6ff78e57d86772929748881e281351d8c2bf856" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.165061 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Dec 13 06:51:50 crc kubenswrapper[5048]: E1213 06:51:50.165606 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97a8bb68-6317-4a75-bf87-f7d6c6cb0023" containerName="kube-state-metrics" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.165625 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="97a8bb68-6317-4a75-bf87-f7d6c6cb0023" containerName="kube-state-metrics" Dec 13 06:51:50 crc kubenswrapper[5048]: E1213 06:51:50.165644 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2040734-c367-448b-a7ce-762b168b35c0" containerName="init" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.165652 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2040734-c367-448b-a7ce-762b168b35c0" containerName="init" Dec 13 06:51:50 crc kubenswrapper[5048]: E1213 06:51:50.165677 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2040734-c367-448b-a7ce-762b168b35c0" containerName="dnsmasq-dns" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.165685 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2040734-c367-448b-a7ce-762b168b35c0" containerName="dnsmasq-dns" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.165897 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="97a8bb68-6317-4a75-bf87-f7d6c6cb0023" containerName="kube-state-metrics" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.165914 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2040734-c367-448b-a7ce-762b168b35c0" containerName="dnsmasq-dns" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.166758 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.169673 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.170248 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.182579 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-qhcq5"] Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.201504 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-qhcq5"] Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.208299 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.353493 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/c1e7f131-1d2e-41de-8cba-e54b383324c5-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"c1e7f131-1d2e-41de-8cba-e54b383324c5\") " pod="openstack/kube-state-metrics-0" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.353646 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e7f131-1d2e-41de-8cba-e54b383324c5-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"c1e7f131-1d2e-41de-8cba-e54b383324c5\") " pod="openstack/kube-state-metrics-0" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.353703 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kcjsm\" (UniqueName: \"kubernetes.io/projected/c1e7f131-1d2e-41de-8cba-e54b383324c5-kube-api-access-kcjsm\") pod \"kube-state-metrics-0\" (UID: \"c1e7f131-1d2e-41de-8cba-e54b383324c5\") " pod="openstack/kube-state-metrics-0" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.353721 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1e7f131-1d2e-41de-8cba-e54b383324c5-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"c1e7f131-1d2e-41de-8cba-e54b383324c5\") " pod="openstack/kube-state-metrics-0" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.455770 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/c1e7f131-1d2e-41de-8cba-e54b383324c5-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"c1e7f131-1d2e-41de-8cba-e54b383324c5\") " pod="openstack/kube-state-metrics-0" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.455882 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e7f131-1d2e-41de-8cba-e54b383324c5-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"c1e7f131-1d2e-41de-8cba-e54b383324c5\") " pod="openstack/kube-state-metrics-0" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.455951 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kcjsm\" (UniqueName: \"kubernetes.io/projected/c1e7f131-1d2e-41de-8cba-e54b383324c5-kube-api-access-kcjsm\") pod \"kube-state-metrics-0\" (UID: \"c1e7f131-1d2e-41de-8cba-e54b383324c5\") " pod="openstack/kube-state-metrics-0" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.455977 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1e7f131-1d2e-41de-8cba-e54b383324c5-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"c1e7f131-1d2e-41de-8cba-e54b383324c5\") " pod="openstack/kube-state-metrics-0" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.460530 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e7f131-1d2e-41de-8cba-e54b383324c5-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"c1e7f131-1d2e-41de-8cba-e54b383324c5\") " pod="openstack/kube-state-metrics-0" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.461050 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1e7f131-1d2e-41de-8cba-e54b383324c5-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"c1e7f131-1d2e-41de-8cba-e54b383324c5\") " pod="openstack/kube-state-metrics-0" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.474101 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/c1e7f131-1d2e-41de-8cba-e54b383324c5-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"c1e7f131-1d2e-41de-8cba-e54b383324c5\") " pod="openstack/kube-state-metrics-0" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.475483 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kcjsm\" (UniqueName: \"kubernetes.io/projected/c1e7f131-1d2e-41de-8cba-e54b383324c5-kube-api-access-kcjsm\") pod \"kube-state-metrics-0\" (UID: \"c1e7f131-1d2e-41de-8cba-e54b383324c5\") " pod="openstack/kube-state-metrics-0" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.534647 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.582633 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97a8bb68-6317-4a75-bf87-f7d6c6cb0023" path="/var/lib/kubelet/pods/97a8bb68-6317-4a75-bf87-f7d6c6cb0023/volumes" Dec 13 06:51:50 crc kubenswrapper[5048]: I1213 06:51:50.583160 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2040734-c367-448b-a7ce-762b168b35c0" path="/var/lib/kubelet/pods/a2040734-c367-448b-a7ce-762b168b35c0/volumes" Dec 13 06:51:51 crc kubenswrapper[5048]: I1213 06:51:51.046526 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 13 06:51:51 crc kubenswrapper[5048]: I1213 06:51:51.054130 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:51:51 crc kubenswrapper[5048]: I1213 06:51:51.054564 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a5082b8c-ec48-4d9c-9204-45580d1bf111" containerName="ceilometer-central-agent" containerID="cri-o://857455881e008335cf61e00b4a6d2dcca52900ec662d5e1b8ac9bbc18fab1720" gracePeriod=30 Dec 13 06:51:51 crc kubenswrapper[5048]: I1213 06:51:51.055082 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a5082b8c-ec48-4d9c-9204-45580d1bf111" containerName="proxy-httpd" containerID="cri-o://c04c2e8e9183daf195f86aad9ba8ebf7a328a5b1563e869ca69be60640c63b10" gracePeriod=30 Dec 13 06:51:51 crc kubenswrapper[5048]: I1213 06:51:51.055130 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a5082b8c-ec48-4d9c-9204-45580d1bf111" containerName="sg-core" containerID="cri-o://898fd60076ad5d1260695eca7b413de892b7f327bb68edecf91eb32cd012b7da" gracePeriod=30 Dec 13 06:51:51 crc kubenswrapper[5048]: I1213 06:51:51.055165 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a5082b8c-ec48-4d9c-9204-45580d1bf111" containerName="ceilometer-notification-agent" containerID="cri-o://4e917bc1a59b3471fe195615dfaee6dbd99cfc10be426d1242794a73b46f4c48" gracePeriod=30 Dec 13 06:51:51 crc kubenswrapper[5048]: I1213 06:51:51.109377 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"c1e7f131-1d2e-41de-8cba-e54b383324c5","Type":"ContainerStarted","Data":"2744c49089e07c17ce05ad1de1ee1c0e548ef895ec1c0fde22e6883efc381067"} Dec 13 06:51:51 crc kubenswrapper[5048]: I1213 06:51:51.111972 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"219c43b9-12af-4e67-9c5e-93b2e83623b1","Type":"ContainerStarted","Data":"1cdc7ea0ec068a44e0d2d00138f830bc5d943fd1d6db1ab180d3989008466d13"} Dec 13 06:51:51 crc kubenswrapper[5048]: I1213 06:51:51.112102 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Dec 13 06:51:51 crc kubenswrapper[5048]: I1213 06:51:51.113574 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="3a656ea3-a5ba-4fbc-ba4b-4c220207e764" containerName="nova-scheduler-scheduler" containerID="cri-o://740a7a6cd5b5ec8d9ce16bdb3b4678f966dbbf8c336035b2f68dda3cb2459d22" gracePeriod=30 Dec 13 06:51:51 crc kubenswrapper[5048]: I1213 06:51:51.133828 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.133805727 podStartE2EDuration="2.133805727s" podCreationTimestamp="2025-12-13 06:51:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:51:51.129754459 +0000 UTC m=+1344.996349050" watchObservedRunningTime="2025-12-13 06:51:51.133805727 +0000 UTC m=+1345.000400308" Dec 13 06:51:52 crc kubenswrapper[5048]: I1213 06:51:52.125488 5048 generic.go:334] "Generic (PLEG): container finished" podID="a5082b8c-ec48-4d9c-9204-45580d1bf111" containerID="c04c2e8e9183daf195f86aad9ba8ebf7a328a5b1563e869ca69be60640c63b10" exitCode=0 Dec 13 06:51:52 crc kubenswrapper[5048]: I1213 06:51:52.125749 5048 generic.go:334] "Generic (PLEG): container finished" podID="a5082b8c-ec48-4d9c-9204-45580d1bf111" containerID="898fd60076ad5d1260695eca7b413de892b7f327bb68edecf91eb32cd012b7da" exitCode=2 Dec 13 06:51:52 crc kubenswrapper[5048]: I1213 06:51:52.125757 5048 generic.go:334] "Generic (PLEG): container finished" podID="a5082b8c-ec48-4d9c-9204-45580d1bf111" containerID="857455881e008335cf61e00b4a6d2dcca52900ec662d5e1b8ac9bbc18fab1720" exitCode=0 Dec 13 06:51:52 crc kubenswrapper[5048]: I1213 06:51:52.125519 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5082b8c-ec48-4d9c-9204-45580d1bf111","Type":"ContainerDied","Data":"c04c2e8e9183daf195f86aad9ba8ebf7a328a5b1563e869ca69be60640c63b10"} Dec 13 06:51:52 crc kubenswrapper[5048]: I1213 06:51:52.125818 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5082b8c-ec48-4d9c-9204-45580d1bf111","Type":"ContainerDied","Data":"898fd60076ad5d1260695eca7b413de892b7f327bb68edecf91eb32cd012b7da"} Dec 13 06:51:52 crc kubenswrapper[5048]: I1213 06:51:52.125829 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5082b8c-ec48-4d9c-9204-45580d1bf111","Type":"ContainerDied","Data":"857455881e008335cf61e00b4a6d2dcca52900ec662d5e1b8ac9bbc18fab1720"} Dec 13 06:51:52 crc kubenswrapper[5048]: I1213 06:51:52.129063 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"c1e7f131-1d2e-41de-8cba-e54b383324c5","Type":"ContainerStarted","Data":"387d9be2e22ad8c0a15ad972927c00cc2ca4c99fb78f95a45d5dc8fb64de3271"} Dec 13 06:51:52 crc kubenswrapper[5048]: I1213 06:51:52.129351 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Dec 13 06:51:52 crc kubenswrapper[5048]: I1213 06:51:52.151346 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.697900754 podStartE2EDuration="2.151319738s" podCreationTimestamp="2025-12-13 06:51:50 +0000 UTC" firstStartedPulling="2025-12-13 06:51:51.067309729 +0000 UTC m=+1344.933904310" lastFinishedPulling="2025-12-13 06:51:51.520728713 +0000 UTC m=+1345.387323294" observedRunningTime="2025-12-13 06:51:52.147942189 +0000 UTC m=+1346.014536780" watchObservedRunningTime="2025-12-13 06:51:52.151319738 +0000 UTC m=+1346.017914319" Dec 13 06:51:53 crc kubenswrapper[5048]: E1213 06:51:53.187167 5048 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="740a7a6cd5b5ec8d9ce16bdb3b4678f966dbbf8c336035b2f68dda3cb2459d22" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 13 06:51:53 crc kubenswrapper[5048]: E1213 06:51:53.189133 5048 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="740a7a6cd5b5ec8d9ce16bdb3b4678f966dbbf8c336035b2f68dda3cb2459d22" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 13 06:51:53 crc kubenswrapper[5048]: E1213 06:51:53.190569 5048 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="740a7a6cd5b5ec8d9ce16bdb3b4678f966dbbf8c336035b2f68dda3cb2459d22" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 13 06:51:53 crc kubenswrapper[5048]: E1213 06:51:53.190598 5048 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="3a656ea3-a5ba-4fbc-ba4b-4c220207e764" containerName="nova-scheduler-scheduler" Dec 13 06:51:54 crc kubenswrapper[5048]: I1213 06:51:54.149460 5048 generic.go:334] "Generic (PLEG): container finished" podID="3a656ea3-a5ba-4fbc-ba4b-4c220207e764" containerID="740a7a6cd5b5ec8d9ce16bdb3b4678f966dbbf8c336035b2f68dda3cb2459d22" exitCode=0 Dec 13 06:51:54 crc kubenswrapper[5048]: I1213 06:51:54.149696 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3a656ea3-a5ba-4fbc-ba4b-4c220207e764","Type":"ContainerDied","Data":"740a7a6cd5b5ec8d9ce16bdb3b4678f966dbbf8c336035b2f68dda3cb2459d22"} Dec 13 06:51:54 crc kubenswrapper[5048]: E1213 06:51:54.234498 5048 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3a656ea3_a5ba_4fbc_ba4b_4c220207e764.slice/crio-740a7a6cd5b5ec8d9ce16bdb3b4678f966dbbf8c336035b2f68dda3cb2459d22.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3a656ea3_a5ba_4fbc_ba4b_4c220207e764.slice/crio-conmon-740a7a6cd5b5ec8d9ce16bdb3b4678f966dbbf8c336035b2f68dda3cb2459d22.scope\": RecentStats: unable to find data in memory cache]" Dec 13 06:51:54 crc kubenswrapper[5048]: I1213 06:51:54.396689 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 13 06:51:54 crc kubenswrapper[5048]: I1213 06:51:54.586436 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a656ea3-a5ba-4fbc-ba4b-4c220207e764-config-data\") pod \"3a656ea3-a5ba-4fbc-ba4b-4c220207e764\" (UID: \"3a656ea3-a5ba-4fbc-ba4b-4c220207e764\") " Dec 13 06:51:54 crc kubenswrapper[5048]: I1213 06:51:54.587015 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhkpq\" (UniqueName: \"kubernetes.io/projected/3a656ea3-a5ba-4fbc-ba4b-4c220207e764-kube-api-access-jhkpq\") pod \"3a656ea3-a5ba-4fbc-ba4b-4c220207e764\" (UID: \"3a656ea3-a5ba-4fbc-ba4b-4c220207e764\") " Dec 13 06:51:54 crc kubenswrapper[5048]: I1213 06:51:54.587131 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a656ea3-a5ba-4fbc-ba4b-4c220207e764-combined-ca-bundle\") pod \"3a656ea3-a5ba-4fbc-ba4b-4c220207e764\" (UID: \"3a656ea3-a5ba-4fbc-ba4b-4c220207e764\") " Dec 13 06:51:54 crc kubenswrapper[5048]: I1213 06:51:54.592493 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a656ea3-a5ba-4fbc-ba4b-4c220207e764-kube-api-access-jhkpq" (OuterVolumeSpecName: "kube-api-access-jhkpq") pod "3a656ea3-a5ba-4fbc-ba4b-4c220207e764" (UID: "3a656ea3-a5ba-4fbc-ba4b-4c220207e764"). InnerVolumeSpecName "kube-api-access-jhkpq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:51:54 crc kubenswrapper[5048]: I1213 06:51:54.631101 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a656ea3-a5ba-4fbc-ba4b-4c220207e764-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3a656ea3-a5ba-4fbc-ba4b-4c220207e764" (UID: "3a656ea3-a5ba-4fbc-ba4b-4c220207e764"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:54 crc kubenswrapper[5048]: I1213 06:51:54.635693 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a656ea3-a5ba-4fbc-ba4b-4c220207e764-config-data" (OuterVolumeSpecName: "config-data") pod "3a656ea3-a5ba-4fbc-ba4b-4c220207e764" (UID: "3a656ea3-a5ba-4fbc-ba4b-4c220207e764"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:54 crc kubenswrapper[5048]: I1213 06:51:54.688927 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a656ea3-a5ba-4fbc-ba4b-4c220207e764-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:54 crc kubenswrapper[5048]: I1213 06:51:54.688957 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a656ea3-a5ba-4fbc-ba4b-4c220207e764-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:54 crc kubenswrapper[5048]: I1213 06:51:54.688967 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhkpq\" (UniqueName: \"kubernetes.io/projected/3a656ea3-a5ba-4fbc-ba4b-4c220207e764-kube-api-access-jhkpq\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.012956 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.112349 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-config-data\") pod \"a5082b8c-ec48-4d9c-9204-45580d1bf111\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.112407 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5082b8c-ec48-4d9c-9204-45580d1bf111-log-httpd\") pod \"a5082b8c-ec48-4d9c-9204-45580d1bf111\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.112533 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-combined-ca-bundle\") pod \"a5082b8c-ec48-4d9c-9204-45580d1bf111\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.112567 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-scripts\") pod \"a5082b8c-ec48-4d9c-9204-45580d1bf111\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.112636 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-sg-core-conf-yaml\") pod \"a5082b8c-ec48-4d9c-9204-45580d1bf111\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.112714 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vjvp\" (UniqueName: \"kubernetes.io/projected/a5082b8c-ec48-4d9c-9204-45580d1bf111-kube-api-access-4vjvp\") pod \"a5082b8c-ec48-4d9c-9204-45580d1bf111\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.112763 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5082b8c-ec48-4d9c-9204-45580d1bf111-run-httpd\") pod \"a5082b8c-ec48-4d9c-9204-45580d1bf111\" (UID: \"a5082b8c-ec48-4d9c-9204-45580d1bf111\") " Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.113512 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5082b8c-ec48-4d9c-9204-45580d1bf111-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a5082b8c-ec48-4d9c-9204-45580d1bf111" (UID: "a5082b8c-ec48-4d9c-9204-45580d1bf111"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.114252 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5082b8c-ec48-4d9c-9204-45580d1bf111-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a5082b8c-ec48-4d9c-9204-45580d1bf111" (UID: "a5082b8c-ec48-4d9c-9204-45580d1bf111"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.117829 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-scripts" (OuterVolumeSpecName: "scripts") pod "a5082b8c-ec48-4d9c-9204-45580d1bf111" (UID: "a5082b8c-ec48-4d9c-9204-45580d1bf111"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.118664 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5082b8c-ec48-4d9c-9204-45580d1bf111-kube-api-access-4vjvp" (OuterVolumeSpecName: "kube-api-access-4vjvp") pod "a5082b8c-ec48-4d9c-9204-45580d1bf111" (UID: "a5082b8c-ec48-4d9c-9204-45580d1bf111"). InnerVolumeSpecName "kube-api-access-4vjvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.139993 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a5082b8c-ec48-4d9c-9204-45580d1bf111" (UID: "a5082b8c-ec48-4d9c-9204-45580d1bf111"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.160504 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3a656ea3-a5ba-4fbc-ba4b-4c220207e764","Type":"ContainerDied","Data":"84c6c8e267e78c32fddef76a41bb65f605046fb99c61f0ecd8ce35061e425fa0"} Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.160593 5048 scope.go:117] "RemoveContainer" containerID="740a7a6cd5b5ec8d9ce16bdb3b4678f966dbbf8c336035b2f68dda3cb2459d22" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.162087 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.172267 5048 generic.go:334] "Generic (PLEG): container finished" podID="a5082b8c-ec48-4d9c-9204-45580d1bf111" containerID="4e917bc1a59b3471fe195615dfaee6dbd99cfc10be426d1242794a73b46f4c48" exitCode=0 Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.172303 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5082b8c-ec48-4d9c-9204-45580d1bf111","Type":"ContainerDied","Data":"4e917bc1a59b3471fe195615dfaee6dbd99cfc10be426d1242794a73b46f4c48"} Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.172327 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5082b8c-ec48-4d9c-9204-45580d1bf111","Type":"ContainerDied","Data":"14e820a9059db3c8bde22497c6b7bca2e3ffe71ab32d5c63a20a4c96feeb8786"} Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.172378 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.193983 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a5082b8c-ec48-4d9c-9204-45580d1bf111" (UID: "a5082b8c-ec48-4d9c-9204-45580d1bf111"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.207600 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-config-data" (OuterVolumeSpecName: "config-data") pod "a5082b8c-ec48-4d9c-9204-45580d1bf111" (UID: "a5082b8c-ec48-4d9c-9204-45580d1bf111"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.214243 5048 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5082b8c-ec48-4d9c-9204-45580d1bf111-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.214265 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.214273 5048 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5082b8c-ec48-4d9c-9204-45580d1bf111-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.214282 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.214293 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.214301 5048 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a5082b8c-ec48-4d9c-9204-45580d1bf111-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.214309 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vjvp\" (UniqueName: \"kubernetes.io/projected/a5082b8c-ec48-4d9c-9204-45580d1bf111-kube-api-access-4vjvp\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.308055 5048 scope.go:117] "RemoveContainer" containerID="c04c2e8e9183daf195f86aad9ba8ebf7a328a5b1563e869ca69be60640c63b10" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.338619 5048 scope.go:117] "RemoveContainer" containerID="898fd60076ad5d1260695eca7b413de892b7f327bb68edecf91eb32cd012b7da" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.338756 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.351220 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.359224 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 13 06:51:55 crc kubenswrapper[5048]: E1213 06:51:55.359583 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5082b8c-ec48-4d9c-9204-45580d1bf111" containerName="ceilometer-notification-agent" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.359600 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5082b8c-ec48-4d9c-9204-45580d1bf111" containerName="ceilometer-notification-agent" Dec 13 06:51:55 crc kubenswrapper[5048]: E1213 06:51:55.359612 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5082b8c-ec48-4d9c-9204-45580d1bf111" containerName="sg-core" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.359618 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5082b8c-ec48-4d9c-9204-45580d1bf111" containerName="sg-core" Dec 13 06:51:55 crc kubenswrapper[5048]: E1213 06:51:55.359639 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5082b8c-ec48-4d9c-9204-45580d1bf111" containerName="ceilometer-central-agent" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.359648 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5082b8c-ec48-4d9c-9204-45580d1bf111" containerName="ceilometer-central-agent" Dec 13 06:51:55 crc kubenswrapper[5048]: E1213 06:51:55.359677 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5082b8c-ec48-4d9c-9204-45580d1bf111" containerName="proxy-httpd" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.359682 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5082b8c-ec48-4d9c-9204-45580d1bf111" containerName="proxy-httpd" Dec 13 06:51:55 crc kubenswrapper[5048]: E1213 06:51:55.359693 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a656ea3-a5ba-4fbc-ba4b-4c220207e764" containerName="nova-scheduler-scheduler" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.359698 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a656ea3-a5ba-4fbc-ba4b-4c220207e764" containerName="nova-scheduler-scheduler" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.359848 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5082b8c-ec48-4d9c-9204-45580d1bf111" containerName="ceilometer-central-agent" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.359899 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5082b8c-ec48-4d9c-9204-45580d1bf111" containerName="proxy-httpd" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.359913 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a656ea3-a5ba-4fbc-ba4b-4c220207e764" containerName="nova-scheduler-scheduler" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.359924 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5082b8c-ec48-4d9c-9204-45580d1bf111" containerName="sg-core" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.359931 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5082b8c-ec48-4d9c-9204-45580d1bf111" containerName="ceilometer-notification-agent" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.360625 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.363379 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.369937 5048 scope.go:117] "RemoveContainer" containerID="4e917bc1a59b3471fe195615dfaee6dbd99cfc10be426d1242794a73b46f4c48" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.376867 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.392012 5048 scope.go:117] "RemoveContainer" containerID="857455881e008335cf61e00b4a6d2dcca52900ec662d5e1b8ac9bbc18fab1720" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.409901 5048 scope.go:117] "RemoveContainer" containerID="c04c2e8e9183daf195f86aad9ba8ebf7a328a5b1563e869ca69be60640c63b10" Dec 13 06:51:55 crc kubenswrapper[5048]: E1213 06:51:55.410482 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c04c2e8e9183daf195f86aad9ba8ebf7a328a5b1563e869ca69be60640c63b10\": container with ID starting with c04c2e8e9183daf195f86aad9ba8ebf7a328a5b1563e869ca69be60640c63b10 not found: ID does not exist" containerID="c04c2e8e9183daf195f86aad9ba8ebf7a328a5b1563e869ca69be60640c63b10" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.410521 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c04c2e8e9183daf195f86aad9ba8ebf7a328a5b1563e869ca69be60640c63b10"} err="failed to get container status \"c04c2e8e9183daf195f86aad9ba8ebf7a328a5b1563e869ca69be60640c63b10\": rpc error: code = NotFound desc = could not find container \"c04c2e8e9183daf195f86aad9ba8ebf7a328a5b1563e869ca69be60640c63b10\": container with ID starting with c04c2e8e9183daf195f86aad9ba8ebf7a328a5b1563e869ca69be60640c63b10 not found: ID does not exist" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.410547 5048 scope.go:117] "RemoveContainer" containerID="898fd60076ad5d1260695eca7b413de892b7f327bb68edecf91eb32cd012b7da" Dec 13 06:51:55 crc kubenswrapper[5048]: E1213 06:51:55.410875 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"898fd60076ad5d1260695eca7b413de892b7f327bb68edecf91eb32cd012b7da\": container with ID starting with 898fd60076ad5d1260695eca7b413de892b7f327bb68edecf91eb32cd012b7da not found: ID does not exist" containerID="898fd60076ad5d1260695eca7b413de892b7f327bb68edecf91eb32cd012b7da" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.410900 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"898fd60076ad5d1260695eca7b413de892b7f327bb68edecf91eb32cd012b7da"} err="failed to get container status \"898fd60076ad5d1260695eca7b413de892b7f327bb68edecf91eb32cd012b7da\": rpc error: code = NotFound desc = could not find container \"898fd60076ad5d1260695eca7b413de892b7f327bb68edecf91eb32cd012b7da\": container with ID starting with 898fd60076ad5d1260695eca7b413de892b7f327bb68edecf91eb32cd012b7da not found: ID does not exist" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.410919 5048 scope.go:117] "RemoveContainer" containerID="4e917bc1a59b3471fe195615dfaee6dbd99cfc10be426d1242794a73b46f4c48" Dec 13 06:51:55 crc kubenswrapper[5048]: E1213 06:51:55.411200 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e917bc1a59b3471fe195615dfaee6dbd99cfc10be426d1242794a73b46f4c48\": container with ID starting with 4e917bc1a59b3471fe195615dfaee6dbd99cfc10be426d1242794a73b46f4c48 not found: ID does not exist" containerID="4e917bc1a59b3471fe195615dfaee6dbd99cfc10be426d1242794a73b46f4c48" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.411227 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e917bc1a59b3471fe195615dfaee6dbd99cfc10be426d1242794a73b46f4c48"} err="failed to get container status \"4e917bc1a59b3471fe195615dfaee6dbd99cfc10be426d1242794a73b46f4c48\": rpc error: code = NotFound desc = could not find container \"4e917bc1a59b3471fe195615dfaee6dbd99cfc10be426d1242794a73b46f4c48\": container with ID starting with 4e917bc1a59b3471fe195615dfaee6dbd99cfc10be426d1242794a73b46f4c48 not found: ID does not exist" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.411244 5048 scope.go:117] "RemoveContainer" containerID="857455881e008335cf61e00b4a6d2dcca52900ec662d5e1b8ac9bbc18fab1720" Dec 13 06:51:55 crc kubenswrapper[5048]: E1213 06:51:55.411578 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"857455881e008335cf61e00b4a6d2dcca52900ec662d5e1b8ac9bbc18fab1720\": container with ID starting with 857455881e008335cf61e00b4a6d2dcca52900ec662d5e1b8ac9bbc18fab1720 not found: ID does not exist" containerID="857455881e008335cf61e00b4a6d2dcca52900ec662d5e1b8ac9bbc18fab1720" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.411623 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"857455881e008335cf61e00b4a6d2dcca52900ec662d5e1b8ac9bbc18fab1720"} err="failed to get container status \"857455881e008335cf61e00b4a6d2dcca52900ec662d5e1b8ac9bbc18fab1720\": rpc error: code = NotFound desc = could not find container \"857455881e008335cf61e00b4a6d2dcca52900ec662d5e1b8ac9bbc18fab1720\": container with ID starting with 857455881e008335cf61e00b4a6d2dcca52900ec662d5e1b8ac9bbc18fab1720 not found: ID does not exist" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.513715 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.526010 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xj2xg\" (UniqueName: \"kubernetes.io/projected/affd50c3-a8a2-4925-a7b5-5e6836c0c8cf-kube-api-access-xj2xg\") pod \"nova-scheduler-0\" (UID: \"affd50c3-a8a2-4925-a7b5-5e6836c0c8cf\") " pod="openstack/nova-scheduler-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.526108 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/affd50c3-a8a2-4925-a7b5-5e6836c0c8cf-config-data\") pod \"nova-scheduler-0\" (UID: \"affd50c3-a8a2-4925-a7b5-5e6836c0c8cf\") " pod="openstack/nova-scheduler-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.526769 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/affd50c3-a8a2-4925-a7b5-5e6836c0c8cf-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"affd50c3-a8a2-4925-a7b5-5e6836c0c8cf\") " pod="openstack/nova-scheduler-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.528471 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.540368 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.544086 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.548885 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.553287 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.553543 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.553836 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.629519 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ee8f067-026a-4612-aba4-3cc8976dc02b-run-httpd\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.629813 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.629895 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.629994 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-config-data\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.630083 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.630163 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlqc4\" (UniqueName: \"kubernetes.io/projected/5ee8f067-026a-4612-aba4-3cc8976dc02b-kube-api-access-nlqc4\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.630258 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xj2xg\" (UniqueName: \"kubernetes.io/projected/affd50c3-a8a2-4925-a7b5-5e6836c0c8cf-kube-api-access-xj2xg\") pod \"nova-scheduler-0\" (UID: \"affd50c3-a8a2-4925-a7b5-5e6836c0c8cf\") " pod="openstack/nova-scheduler-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.630366 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/affd50c3-a8a2-4925-a7b5-5e6836c0c8cf-config-data\") pod \"nova-scheduler-0\" (UID: \"affd50c3-a8a2-4925-a7b5-5e6836c0c8cf\") " pod="openstack/nova-scheduler-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.630437 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-scripts\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.630611 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ee8f067-026a-4612-aba4-3cc8976dc02b-log-httpd\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.630706 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/affd50c3-a8a2-4925-a7b5-5e6836c0c8cf-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"affd50c3-a8a2-4925-a7b5-5e6836c0c8cf\") " pod="openstack/nova-scheduler-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.634788 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/affd50c3-a8a2-4925-a7b5-5e6836c0c8cf-config-data\") pod \"nova-scheduler-0\" (UID: \"affd50c3-a8a2-4925-a7b5-5e6836c0c8cf\") " pod="openstack/nova-scheduler-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.635405 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/affd50c3-a8a2-4925-a7b5-5e6836c0c8cf-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"affd50c3-a8a2-4925-a7b5-5e6836c0c8cf\") " pod="openstack/nova-scheduler-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.648553 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xj2xg\" (UniqueName: \"kubernetes.io/projected/affd50c3-a8a2-4925-a7b5-5e6836c0c8cf-kube-api-access-xj2xg\") pod \"nova-scheduler-0\" (UID: \"affd50c3-a8a2-4925-a7b5-5e6836c0c8cf\") " pod="openstack/nova-scheduler-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.683869 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.732274 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ee8f067-026a-4612-aba4-3cc8976dc02b-run-httpd\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.732498 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.732584 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.732676 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-config-data\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.732766 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.732848 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlqc4\" (UniqueName: \"kubernetes.io/projected/5ee8f067-026a-4612-aba4-3cc8976dc02b-kube-api-access-nlqc4\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.732959 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ee8f067-026a-4612-aba4-3cc8976dc02b-run-httpd\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.732965 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-scripts\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.733032 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ee8f067-026a-4612-aba4-3cc8976dc02b-log-httpd\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.733360 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ee8f067-026a-4612-aba4-3cc8976dc02b-log-httpd\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.737334 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.737491 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.737864 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-config-data\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.737942 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.742905 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-scripts\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.750924 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlqc4\" (UniqueName: \"kubernetes.io/projected/5ee8f067-026a-4612-aba4-3cc8976dc02b-kube-api-access-nlqc4\") pod \"ceilometer-0\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " pod="openstack/ceilometer-0" Dec 13 06:51:55 crc kubenswrapper[5048]: I1213 06:51:55.866658 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.042534 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.187592 5048 generic.go:334] "Generic (PLEG): container finished" podID="9360f872-f320-4ae7-a11c-5d859e76db22" containerID="e61ed9918f95fb0ab81c8c3351653ef25314136fd9f18eb9b8f2bcdb780f76d7" exitCode=0 Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.187960 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9360f872-f320-4ae7-a11c-5d859e76db22","Type":"ContainerDied","Data":"e61ed9918f95fb0ab81c8c3351653ef25314136fd9f18eb9b8f2bcdb780f76d7"} Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.188032 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9360f872-f320-4ae7-a11c-5d859e76db22","Type":"ContainerDied","Data":"d95ab645d8e41cdb0192433b15ee10c44edcd69496486ece92981107ac7f180f"} Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.188052 5048 scope.go:117] "RemoveContainer" containerID="e61ed9918f95fb0ab81c8c3351653ef25314136fd9f18eb9b8f2bcdb780f76d7" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.188107 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.213572 5048 scope.go:117] "RemoveContainer" containerID="45ea782cfefb7ee828089f889ada280c0b1071d35bf6cca6a18c9d6e2b590cdb" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.220609 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 13 06:51:56 crc kubenswrapper[5048]: W1213 06:51:56.224622 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaffd50c3_a8a2_4925_a7b5_5e6836c0c8cf.slice/crio-c5aa11a9598e78f909b47a0fa54418d9c4dd084c09a6deccdc4c4f3b51149051 WatchSource:0}: Error finding container c5aa11a9598e78f909b47a0fa54418d9c4dd084c09a6deccdc4c4f3b51149051: Status 404 returned error can't find the container with id c5aa11a9598e78f909b47a0fa54418d9c4dd084c09a6deccdc4c4f3b51149051 Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.238764 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mdgbh\" (UniqueName: \"kubernetes.io/projected/9360f872-f320-4ae7-a11c-5d859e76db22-kube-api-access-mdgbh\") pod \"9360f872-f320-4ae7-a11c-5d859e76db22\" (UID: \"9360f872-f320-4ae7-a11c-5d859e76db22\") " Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.238824 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9360f872-f320-4ae7-a11c-5d859e76db22-combined-ca-bundle\") pod \"9360f872-f320-4ae7-a11c-5d859e76db22\" (UID: \"9360f872-f320-4ae7-a11c-5d859e76db22\") " Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.238894 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9360f872-f320-4ae7-a11c-5d859e76db22-config-data\") pod \"9360f872-f320-4ae7-a11c-5d859e76db22\" (UID: \"9360f872-f320-4ae7-a11c-5d859e76db22\") " Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.238981 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9360f872-f320-4ae7-a11c-5d859e76db22-logs\") pod \"9360f872-f320-4ae7-a11c-5d859e76db22\" (UID: \"9360f872-f320-4ae7-a11c-5d859e76db22\") " Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.239881 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9360f872-f320-4ae7-a11c-5d859e76db22-logs" (OuterVolumeSpecName: "logs") pod "9360f872-f320-4ae7-a11c-5d859e76db22" (UID: "9360f872-f320-4ae7-a11c-5d859e76db22"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.243302 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9360f872-f320-4ae7-a11c-5d859e76db22-kube-api-access-mdgbh" (OuterVolumeSpecName: "kube-api-access-mdgbh") pod "9360f872-f320-4ae7-a11c-5d859e76db22" (UID: "9360f872-f320-4ae7-a11c-5d859e76db22"). InnerVolumeSpecName "kube-api-access-mdgbh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.270547 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9360f872-f320-4ae7-a11c-5d859e76db22-config-data" (OuterVolumeSpecName: "config-data") pod "9360f872-f320-4ae7-a11c-5d859e76db22" (UID: "9360f872-f320-4ae7-a11c-5d859e76db22"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.272083 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9360f872-f320-4ae7-a11c-5d859e76db22-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9360f872-f320-4ae7-a11c-5d859e76db22" (UID: "9360f872-f320-4ae7-a11c-5d859e76db22"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.340771 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mdgbh\" (UniqueName: \"kubernetes.io/projected/9360f872-f320-4ae7-a11c-5d859e76db22-kube-api-access-mdgbh\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.340811 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9360f872-f320-4ae7-a11c-5d859e76db22-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.340829 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9360f872-f320-4ae7-a11c-5d859e76db22-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.340849 5048 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9360f872-f320-4ae7-a11c-5d859e76db22-logs\") on node \"crc\" DevicePath \"\"" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.356300 5048 scope.go:117] "RemoveContainer" containerID="e61ed9918f95fb0ab81c8c3351653ef25314136fd9f18eb9b8f2bcdb780f76d7" Dec 13 06:51:56 crc kubenswrapper[5048]: E1213 06:51:56.356807 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e61ed9918f95fb0ab81c8c3351653ef25314136fd9f18eb9b8f2bcdb780f76d7\": container with ID starting with e61ed9918f95fb0ab81c8c3351653ef25314136fd9f18eb9b8f2bcdb780f76d7 not found: ID does not exist" containerID="e61ed9918f95fb0ab81c8c3351653ef25314136fd9f18eb9b8f2bcdb780f76d7" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.356862 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e61ed9918f95fb0ab81c8c3351653ef25314136fd9f18eb9b8f2bcdb780f76d7"} err="failed to get container status \"e61ed9918f95fb0ab81c8c3351653ef25314136fd9f18eb9b8f2bcdb780f76d7\": rpc error: code = NotFound desc = could not find container \"e61ed9918f95fb0ab81c8c3351653ef25314136fd9f18eb9b8f2bcdb780f76d7\": container with ID starting with e61ed9918f95fb0ab81c8c3351653ef25314136fd9f18eb9b8f2bcdb780f76d7 not found: ID does not exist" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.356904 5048 scope.go:117] "RemoveContainer" containerID="45ea782cfefb7ee828089f889ada280c0b1071d35bf6cca6a18c9d6e2b590cdb" Dec 13 06:51:56 crc kubenswrapper[5048]: E1213 06:51:56.357402 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45ea782cfefb7ee828089f889ada280c0b1071d35bf6cca6a18c9d6e2b590cdb\": container with ID starting with 45ea782cfefb7ee828089f889ada280c0b1071d35bf6cca6a18c9d6e2b590cdb not found: ID does not exist" containerID="45ea782cfefb7ee828089f889ada280c0b1071d35bf6cca6a18c9d6e2b590cdb" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.357487 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45ea782cfefb7ee828089f889ada280c0b1071d35bf6cca6a18c9d6e2b590cdb"} err="failed to get container status \"45ea782cfefb7ee828089f889ada280c0b1071d35bf6cca6a18c9d6e2b590cdb\": rpc error: code = NotFound desc = could not find container \"45ea782cfefb7ee828089f889ada280c0b1071d35bf6cca6a18c9d6e2b590cdb\": container with ID starting with 45ea782cfefb7ee828089f889ada280c0b1071d35bf6cca6a18c9d6e2b590cdb not found: ID does not exist" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.430276 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.523960 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.538904 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.569069 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 13 06:51:56 crc kubenswrapper[5048]: E1213 06:51:56.570070 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9360f872-f320-4ae7-a11c-5d859e76db22" containerName="nova-api-log" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.570117 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="9360f872-f320-4ae7-a11c-5d859e76db22" containerName="nova-api-log" Dec 13 06:51:56 crc kubenswrapper[5048]: E1213 06:51:56.570165 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9360f872-f320-4ae7-a11c-5d859e76db22" containerName="nova-api-api" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.570176 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="9360f872-f320-4ae7-a11c-5d859e76db22" containerName="nova-api-api" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.570778 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="9360f872-f320-4ae7-a11c-5d859e76db22" containerName="nova-api-log" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.570806 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="9360f872-f320-4ae7-a11c-5d859e76db22" containerName="nova-api-api" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.573031 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.588061 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.593994 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a656ea3-a5ba-4fbc-ba4b-4c220207e764" path="/var/lib/kubelet/pods/3a656ea3-a5ba-4fbc-ba4b-4c220207e764/volumes" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.594587 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9360f872-f320-4ae7-a11c-5d859e76db22" path="/var/lib/kubelet/pods/9360f872-f320-4ae7-a11c-5d859e76db22/volumes" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.595155 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5082b8c-ec48-4d9c-9204-45580d1bf111" path="/var/lib/kubelet/pods/a5082b8c-ec48-4d9c-9204-45580d1bf111/volumes" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.597251 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.755422 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2071eed4-1f89-426e-ba3e-d79fad42a378-config-data\") pod \"nova-api-0\" (UID: \"2071eed4-1f89-426e-ba3e-d79fad42a378\") " pod="openstack/nova-api-0" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.755505 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2071eed4-1f89-426e-ba3e-d79fad42a378-logs\") pod \"nova-api-0\" (UID: \"2071eed4-1f89-426e-ba3e-d79fad42a378\") " pod="openstack/nova-api-0" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.755553 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2071eed4-1f89-426e-ba3e-d79fad42a378-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2071eed4-1f89-426e-ba3e-d79fad42a378\") " pod="openstack/nova-api-0" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.755608 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvkmz\" (UniqueName: \"kubernetes.io/projected/2071eed4-1f89-426e-ba3e-d79fad42a378-kube-api-access-pvkmz\") pod \"nova-api-0\" (UID: \"2071eed4-1f89-426e-ba3e-d79fad42a378\") " pod="openstack/nova-api-0" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.857718 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2071eed4-1f89-426e-ba3e-d79fad42a378-logs\") pod \"nova-api-0\" (UID: \"2071eed4-1f89-426e-ba3e-d79fad42a378\") " pod="openstack/nova-api-0" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.857786 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2071eed4-1f89-426e-ba3e-d79fad42a378-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2071eed4-1f89-426e-ba3e-d79fad42a378\") " pod="openstack/nova-api-0" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.857835 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvkmz\" (UniqueName: \"kubernetes.io/projected/2071eed4-1f89-426e-ba3e-d79fad42a378-kube-api-access-pvkmz\") pod \"nova-api-0\" (UID: \"2071eed4-1f89-426e-ba3e-d79fad42a378\") " pod="openstack/nova-api-0" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.857943 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2071eed4-1f89-426e-ba3e-d79fad42a378-config-data\") pod \"nova-api-0\" (UID: \"2071eed4-1f89-426e-ba3e-d79fad42a378\") " pod="openstack/nova-api-0" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.858460 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2071eed4-1f89-426e-ba3e-d79fad42a378-logs\") pod \"nova-api-0\" (UID: \"2071eed4-1f89-426e-ba3e-d79fad42a378\") " pod="openstack/nova-api-0" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.862544 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2071eed4-1f89-426e-ba3e-d79fad42a378-config-data\") pod \"nova-api-0\" (UID: \"2071eed4-1f89-426e-ba3e-d79fad42a378\") " pod="openstack/nova-api-0" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.863307 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2071eed4-1f89-426e-ba3e-d79fad42a378-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2071eed4-1f89-426e-ba3e-d79fad42a378\") " pod="openstack/nova-api-0" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.878022 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvkmz\" (UniqueName: \"kubernetes.io/projected/2071eed4-1f89-426e-ba3e-d79fad42a378-kube-api-access-pvkmz\") pod \"nova-api-0\" (UID: \"2071eed4-1f89-426e-ba3e-d79fad42a378\") " pod="openstack/nova-api-0" Dec 13 06:51:56 crc kubenswrapper[5048]: I1213 06:51:56.903141 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 13 06:51:57 crc kubenswrapper[5048]: I1213 06:51:57.203095 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ee8f067-026a-4612-aba4-3cc8976dc02b","Type":"ContainerStarted","Data":"5678e9a07a9764ffeacf2499dd1b126005d0b9d0ddd2834e7c85b084432753b1"} Dec 13 06:51:57 crc kubenswrapper[5048]: I1213 06:51:57.203518 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ee8f067-026a-4612-aba4-3cc8976dc02b","Type":"ContainerStarted","Data":"7b8e8833ee81aa9594b417cd8be4adaa1f5468b4c2744e36731e1467fde7d73b"} Dec 13 06:51:57 crc kubenswrapper[5048]: I1213 06:51:57.205233 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"affd50c3-a8a2-4925-a7b5-5e6836c0c8cf","Type":"ContainerStarted","Data":"be559a913b705ae5860ff331bbc9ceda2812f21d11c631de2d2e078ab8e853b9"} Dec 13 06:51:57 crc kubenswrapper[5048]: I1213 06:51:57.205284 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"affd50c3-a8a2-4925-a7b5-5e6836c0c8cf","Type":"ContainerStarted","Data":"c5aa11a9598e78f909b47a0fa54418d9c4dd084c09a6deccdc4c4f3b51149051"} Dec 13 06:51:57 crc kubenswrapper[5048]: I1213 06:51:57.225326 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.225301926 podStartE2EDuration="2.225301926s" podCreationTimestamp="2025-12-13 06:51:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:51:57.220140149 +0000 UTC m=+1351.086734760" watchObservedRunningTime="2025-12-13 06:51:57.225301926 +0000 UTC m=+1351.091896517" Dec 13 06:51:57 crc kubenswrapper[5048]: I1213 06:51:57.366110 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 13 06:51:57 crc kubenswrapper[5048]: W1213 06:51:57.369743 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2071eed4_1f89_426e_ba3e_d79fad42a378.slice/crio-ab55b62f5b0901db2ff2a2247aad4ef70b39e8ddd8d28951e2b5abc7993626f5 WatchSource:0}: Error finding container ab55b62f5b0901db2ff2a2247aad4ef70b39e8ddd8d28951e2b5abc7993626f5: Status 404 returned error can't find the container with id ab55b62f5b0901db2ff2a2247aad4ef70b39e8ddd8d28951e2b5abc7993626f5 Dec 13 06:51:58 crc kubenswrapper[5048]: I1213 06:51:58.227195 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2071eed4-1f89-426e-ba3e-d79fad42a378","Type":"ContainerStarted","Data":"efc2a5fd4eb3b12fcbbdd454e88197cf42c3c37c66d0933b8004c2d090cf981a"} Dec 13 06:51:58 crc kubenswrapper[5048]: I1213 06:51:58.227601 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2071eed4-1f89-426e-ba3e-d79fad42a378","Type":"ContainerStarted","Data":"e3b74ac41a0bfaa5f38e0869bc122ac88e1eb988e75f2c5ad698e73478e3474f"} Dec 13 06:51:58 crc kubenswrapper[5048]: I1213 06:51:58.227617 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2071eed4-1f89-426e-ba3e-d79fad42a378","Type":"ContainerStarted","Data":"ab55b62f5b0901db2ff2a2247aad4ef70b39e8ddd8d28951e2b5abc7993626f5"} Dec 13 06:51:58 crc kubenswrapper[5048]: I1213 06:51:58.230548 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ee8f067-026a-4612-aba4-3cc8976dc02b","Type":"ContainerStarted","Data":"12fa0e4a983ca089a6bde9c9d2c0e2bb854e8e7fcd9ae0c316f434b4680584ae"} Dec 13 06:51:58 crc kubenswrapper[5048]: I1213 06:51:58.253021 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.253000068 podStartE2EDuration="2.253000068s" podCreationTimestamp="2025-12-13 06:51:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:51:58.250581125 +0000 UTC m=+1352.117175726" watchObservedRunningTime="2025-12-13 06:51:58.253000068 +0000 UTC m=+1352.119594649" Dec 13 06:51:59 crc kubenswrapper[5048]: I1213 06:51:59.240844 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ee8f067-026a-4612-aba4-3cc8976dc02b","Type":"ContainerStarted","Data":"7e2a50c3d819afd8d20d882e8c105bf2b9adb54c96fd8aebe0821b3d130c3c1f"} Dec 13 06:51:59 crc kubenswrapper[5048]: I1213 06:51:59.470874 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Dec 13 06:52:00 crc kubenswrapper[5048]: I1213 06:52:00.264517 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ee8f067-026a-4612-aba4-3cc8976dc02b","Type":"ContainerStarted","Data":"c849f85d50b7c72d7bf8b7b7f0aa851fb2f4be8ba98763ee2b645aff7d4ba0eb"} Dec 13 06:52:00 crc kubenswrapper[5048]: I1213 06:52:00.265114 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 13 06:52:00 crc kubenswrapper[5048]: I1213 06:52:00.298027 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.765233786 podStartE2EDuration="5.298007338s" podCreationTimestamp="2025-12-13 06:51:55 +0000 UTC" firstStartedPulling="2025-12-13 06:51:56.431892063 +0000 UTC m=+1350.298486644" lastFinishedPulling="2025-12-13 06:51:59.964665615 +0000 UTC m=+1353.831260196" observedRunningTime="2025-12-13 06:52:00.290136038 +0000 UTC m=+1354.156730619" watchObservedRunningTime="2025-12-13 06:52:00.298007338 +0000 UTC m=+1354.164601919" Dec 13 06:52:00 crc kubenswrapper[5048]: I1213 06:52:00.554608 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Dec 13 06:52:00 crc kubenswrapper[5048]: I1213 06:52:00.684325 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 13 06:52:05 crc kubenswrapper[5048]: I1213 06:52:05.684991 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 13 06:52:05 crc kubenswrapper[5048]: I1213 06:52:05.710915 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 13 06:52:06 crc kubenswrapper[5048]: I1213 06:52:06.356402 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 13 06:52:06 crc kubenswrapper[5048]: I1213 06:52:06.904400 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 13 06:52:06 crc kubenswrapper[5048]: I1213 06:52:06.904487 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 13 06:52:07 crc kubenswrapper[5048]: I1213 06:52:07.987938 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="2071eed4-1f89-426e-ba3e-d79fad42a378" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.193:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 13 06:52:07 crc kubenswrapper[5048]: I1213 06:52:07.987924 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="2071eed4-1f89-426e-ba3e-d79fad42a378" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.193:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.326703 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.333524 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.374386 5048 generic.go:334] "Generic (PLEG): container finished" podID="233040b0-ce2e-4fe7-8863-9093c75e3e7a" containerID="16c8243e3ab3c804f6f643c78f667d6b578c6e92c17a2161214d8e3e51f3a441" exitCode=137 Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.374452 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.374472 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"233040b0-ce2e-4fe7-8863-9093c75e3e7a","Type":"ContainerDied","Data":"16c8243e3ab3c804f6f643c78f667d6b578c6e92c17a2161214d8e3e51f3a441"} Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.374962 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"233040b0-ce2e-4fe7-8863-9093c75e3e7a","Type":"ContainerDied","Data":"1df6776915fd8077262b7ad37482527c89e9e5836babaf83956f7a2a648059cf"} Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.374988 5048 scope.go:117] "RemoveContainer" containerID="16c8243e3ab3c804f6f643c78f667d6b578c6e92c17a2161214d8e3e51f3a441" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.377331 5048 generic.go:334] "Generic (PLEG): container finished" podID="5f1dc8fe-cba2-4a26-9d79-42f3be73edb3" containerID="4dca945788a50969ff033b9d4931cb3df06cbcf96f0246ec4db9dcb9b3bcf8a1" exitCode=137 Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.377373 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5f1dc8fe-cba2-4a26-9d79-42f3be73edb3","Type":"ContainerDied","Data":"4dca945788a50969ff033b9d4931cb3df06cbcf96f0246ec4db9dcb9b3bcf8a1"} Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.377400 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5f1dc8fe-cba2-4a26-9d79-42f3be73edb3","Type":"ContainerDied","Data":"ff7f49b667160ef35f0628eda22a212e48446bcfe5732e262a885d0668e7e20d"} Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.377396 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.396333 5048 scope.go:117] "RemoveContainer" containerID="34f65462a5d92b583686ebaf6f7ba9685249faf91354fabb593541bdaf0bfff1" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.416728 5048 scope.go:117] "RemoveContainer" containerID="16c8243e3ab3c804f6f643c78f667d6b578c6e92c17a2161214d8e3e51f3a441" Dec 13 06:52:13 crc kubenswrapper[5048]: E1213 06:52:13.417116 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16c8243e3ab3c804f6f643c78f667d6b578c6e92c17a2161214d8e3e51f3a441\": container with ID starting with 16c8243e3ab3c804f6f643c78f667d6b578c6e92c17a2161214d8e3e51f3a441 not found: ID does not exist" containerID="16c8243e3ab3c804f6f643c78f667d6b578c6e92c17a2161214d8e3e51f3a441" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.417152 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16c8243e3ab3c804f6f643c78f667d6b578c6e92c17a2161214d8e3e51f3a441"} err="failed to get container status \"16c8243e3ab3c804f6f643c78f667d6b578c6e92c17a2161214d8e3e51f3a441\": rpc error: code = NotFound desc = could not find container \"16c8243e3ab3c804f6f643c78f667d6b578c6e92c17a2161214d8e3e51f3a441\": container with ID starting with 16c8243e3ab3c804f6f643c78f667d6b578c6e92c17a2161214d8e3e51f3a441 not found: ID does not exist" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.417177 5048 scope.go:117] "RemoveContainer" containerID="34f65462a5d92b583686ebaf6f7ba9685249faf91354fabb593541bdaf0bfff1" Dec 13 06:52:13 crc kubenswrapper[5048]: E1213 06:52:13.417495 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34f65462a5d92b583686ebaf6f7ba9685249faf91354fabb593541bdaf0bfff1\": container with ID starting with 34f65462a5d92b583686ebaf6f7ba9685249faf91354fabb593541bdaf0bfff1 not found: ID does not exist" containerID="34f65462a5d92b583686ebaf6f7ba9685249faf91354fabb593541bdaf0bfff1" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.417548 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34f65462a5d92b583686ebaf6f7ba9685249faf91354fabb593541bdaf0bfff1"} err="failed to get container status \"34f65462a5d92b583686ebaf6f7ba9685249faf91354fabb593541bdaf0bfff1\": rpc error: code = NotFound desc = could not find container \"34f65462a5d92b583686ebaf6f7ba9685249faf91354fabb593541bdaf0bfff1\": container with ID starting with 34f65462a5d92b583686ebaf6f7ba9685249faf91354fabb593541bdaf0bfff1 not found: ID does not exist" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.417583 5048 scope.go:117] "RemoveContainer" containerID="4dca945788a50969ff033b9d4931cb3df06cbcf96f0246ec4db9dcb9b3bcf8a1" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.438512 5048 scope.go:117] "RemoveContainer" containerID="4dca945788a50969ff033b9d4931cb3df06cbcf96f0246ec4db9dcb9b3bcf8a1" Dec 13 06:52:13 crc kubenswrapper[5048]: E1213 06:52:13.438927 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4dca945788a50969ff033b9d4931cb3df06cbcf96f0246ec4db9dcb9b3bcf8a1\": container with ID starting with 4dca945788a50969ff033b9d4931cb3df06cbcf96f0246ec4db9dcb9b3bcf8a1 not found: ID does not exist" containerID="4dca945788a50969ff033b9d4931cb3df06cbcf96f0246ec4db9dcb9b3bcf8a1" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.438954 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4dca945788a50969ff033b9d4931cb3df06cbcf96f0246ec4db9dcb9b3bcf8a1"} err="failed to get container status \"4dca945788a50969ff033b9d4931cb3df06cbcf96f0246ec4db9dcb9b3bcf8a1\": rpc error: code = NotFound desc = could not find container \"4dca945788a50969ff033b9d4931cb3df06cbcf96f0246ec4db9dcb9b3bcf8a1\": container with ID starting with 4dca945788a50969ff033b9d4931cb3df06cbcf96f0246ec4db9dcb9b3bcf8a1 not found: ID does not exist" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.505165 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f1dc8fe-cba2-4a26-9d79-42f3be73edb3-config-data\") pod \"5f1dc8fe-cba2-4a26-9d79-42f3be73edb3\" (UID: \"5f1dc8fe-cba2-4a26-9d79-42f3be73edb3\") " Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.505220 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hq49\" (UniqueName: \"kubernetes.io/projected/5f1dc8fe-cba2-4a26-9d79-42f3be73edb3-kube-api-access-5hq49\") pod \"5f1dc8fe-cba2-4a26-9d79-42f3be73edb3\" (UID: \"5f1dc8fe-cba2-4a26-9d79-42f3be73edb3\") " Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.505270 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/233040b0-ce2e-4fe7-8863-9093c75e3e7a-logs\") pod \"233040b0-ce2e-4fe7-8863-9093c75e3e7a\" (UID: \"233040b0-ce2e-4fe7-8863-9093c75e3e7a\") " Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.505486 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/233040b0-ce2e-4fe7-8863-9093c75e3e7a-combined-ca-bundle\") pod \"233040b0-ce2e-4fe7-8863-9093c75e3e7a\" (UID: \"233040b0-ce2e-4fe7-8863-9093c75e3e7a\") " Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.505536 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/233040b0-ce2e-4fe7-8863-9093c75e3e7a-config-data\") pod \"233040b0-ce2e-4fe7-8863-9093c75e3e7a\" (UID: \"233040b0-ce2e-4fe7-8863-9093c75e3e7a\") " Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.505572 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rcdll\" (UniqueName: \"kubernetes.io/projected/233040b0-ce2e-4fe7-8863-9093c75e3e7a-kube-api-access-rcdll\") pod \"233040b0-ce2e-4fe7-8863-9093c75e3e7a\" (UID: \"233040b0-ce2e-4fe7-8863-9093c75e3e7a\") " Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.505591 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f1dc8fe-cba2-4a26-9d79-42f3be73edb3-combined-ca-bundle\") pod \"5f1dc8fe-cba2-4a26-9d79-42f3be73edb3\" (UID: \"5f1dc8fe-cba2-4a26-9d79-42f3be73edb3\") " Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.506539 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/233040b0-ce2e-4fe7-8863-9093c75e3e7a-logs" (OuterVolumeSpecName: "logs") pod "233040b0-ce2e-4fe7-8863-9093c75e3e7a" (UID: "233040b0-ce2e-4fe7-8863-9093c75e3e7a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.507679 5048 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/233040b0-ce2e-4fe7-8863-9093c75e3e7a-logs\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.512227 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f1dc8fe-cba2-4a26-9d79-42f3be73edb3-kube-api-access-5hq49" (OuterVolumeSpecName: "kube-api-access-5hq49") pod "5f1dc8fe-cba2-4a26-9d79-42f3be73edb3" (UID: "5f1dc8fe-cba2-4a26-9d79-42f3be73edb3"). InnerVolumeSpecName "kube-api-access-5hq49". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.519630 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/233040b0-ce2e-4fe7-8863-9093c75e3e7a-kube-api-access-rcdll" (OuterVolumeSpecName: "kube-api-access-rcdll") pod "233040b0-ce2e-4fe7-8863-9093c75e3e7a" (UID: "233040b0-ce2e-4fe7-8863-9093c75e3e7a"). InnerVolumeSpecName "kube-api-access-rcdll". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.532716 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f1dc8fe-cba2-4a26-9d79-42f3be73edb3-config-data" (OuterVolumeSpecName: "config-data") pod "5f1dc8fe-cba2-4a26-9d79-42f3be73edb3" (UID: "5f1dc8fe-cba2-4a26-9d79-42f3be73edb3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.539931 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/233040b0-ce2e-4fe7-8863-9093c75e3e7a-config-data" (OuterVolumeSpecName: "config-data") pod "233040b0-ce2e-4fe7-8863-9093c75e3e7a" (UID: "233040b0-ce2e-4fe7-8863-9093c75e3e7a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.541580 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f1dc8fe-cba2-4a26-9d79-42f3be73edb3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5f1dc8fe-cba2-4a26-9d79-42f3be73edb3" (UID: "5f1dc8fe-cba2-4a26-9d79-42f3be73edb3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.543228 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/233040b0-ce2e-4fe7-8863-9093c75e3e7a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "233040b0-ce2e-4fe7-8863-9093c75e3e7a" (UID: "233040b0-ce2e-4fe7-8863-9093c75e3e7a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.609350 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/233040b0-ce2e-4fe7-8863-9093c75e3e7a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.609390 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/233040b0-ce2e-4fe7-8863-9093c75e3e7a-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.609399 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rcdll\" (UniqueName: \"kubernetes.io/projected/233040b0-ce2e-4fe7-8863-9093c75e3e7a-kube-api-access-rcdll\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.609412 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f1dc8fe-cba2-4a26-9d79-42f3be73edb3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.609421 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f1dc8fe-cba2-4a26-9d79-42f3be73edb3-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.609429 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hq49\" (UniqueName: \"kubernetes.io/projected/5f1dc8fe-cba2-4a26-9d79-42f3be73edb3-kube-api-access-5hq49\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.715811 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.726809 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.739646 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.752974 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.768693 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 13 06:52:13 crc kubenswrapper[5048]: E1213 06:52:13.769106 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f1dc8fe-cba2-4a26-9d79-42f3be73edb3" containerName="nova-cell1-novncproxy-novncproxy" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.769130 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f1dc8fe-cba2-4a26-9d79-42f3be73edb3" containerName="nova-cell1-novncproxy-novncproxy" Dec 13 06:52:13 crc kubenswrapper[5048]: E1213 06:52:13.769188 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="233040b0-ce2e-4fe7-8863-9093c75e3e7a" containerName="nova-metadata-log" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.769198 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="233040b0-ce2e-4fe7-8863-9093c75e3e7a" containerName="nova-metadata-log" Dec 13 06:52:13 crc kubenswrapper[5048]: E1213 06:52:13.769214 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="233040b0-ce2e-4fe7-8863-9093c75e3e7a" containerName="nova-metadata-metadata" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.769222 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="233040b0-ce2e-4fe7-8863-9093c75e3e7a" containerName="nova-metadata-metadata" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.769464 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="233040b0-ce2e-4fe7-8863-9093c75e3e7a" containerName="nova-metadata-log" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.769507 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="233040b0-ce2e-4fe7-8863-9093c75e3e7a" containerName="nova-metadata-metadata" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.769523 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f1dc8fe-cba2-4a26-9d79-42f3be73edb3" containerName="nova-cell1-novncproxy-novncproxy" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.770130 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.772690 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.773209 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.773366 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.780623 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.789464 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.795088 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.795164 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.795754 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.807602 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.917591 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\") " pod="openstack/nova-metadata-0" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.917701 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\") " pod="openstack/nova-metadata-0" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.917769 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6646f183-ffd9-4870-a202-85003939acd6-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"6646f183-ffd9-4870-a202-85003939acd6\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.917845 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6d8g\" (UniqueName: \"kubernetes.io/projected/6646f183-ffd9-4870-a202-85003939acd6-kube-api-access-t6d8g\") pod \"nova-cell1-novncproxy-0\" (UID: \"6646f183-ffd9-4870-a202-85003939acd6\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.917879 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-config-data\") pod \"nova-metadata-0\" (UID: \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\") " pod="openstack/nova-metadata-0" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.917907 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-logs\") pod \"nova-metadata-0\" (UID: \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\") " pod="openstack/nova-metadata-0" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.917931 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/6646f183-ffd9-4870-a202-85003939acd6-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"6646f183-ffd9-4870-a202-85003939acd6\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.917989 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/6646f183-ffd9-4870-a202-85003939acd6-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"6646f183-ffd9-4870-a202-85003939acd6\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.918013 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmgwv\" (UniqueName: \"kubernetes.io/projected/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-kube-api-access-hmgwv\") pod \"nova-metadata-0\" (UID: \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\") " pod="openstack/nova-metadata-0" Dec 13 06:52:13 crc kubenswrapper[5048]: I1213 06:52:13.918055 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6646f183-ffd9-4870-a202-85003939acd6-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"6646f183-ffd9-4870-a202-85003939acd6\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.019392 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6646f183-ffd9-4870-a202-85003939acd6-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"6646f183-ffd9-4870-a202-85003939acd6\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.019515 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6d8g\" (UniqueName: \"kubernetes.io/projected/6646f183-ffd9-4870-a202-85003939acd6-kube-api-access-t6d8g\") pod \"nova-cell1-novncproxy-0\" (UID: \"6646f183-ffd9-4870-a202-85003939acd6\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.019549 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-config-data\") pod \"nova-metadata-0\" (UID: \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\") " pod="openstack/nova-metadata-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.019575 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-logs\") pod \"nova-metadata-0\" (UID: \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\") " pod="openstack/nova-metadata-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.019598 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/6646f183-ffd9-4870-a202-85003939acd6-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"6646f183-ffd9-4870-a202-85003939acd6\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.019623 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/6646f183-ffd9-4870-a202-85003939acd6-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"6646f183-ffd9-4870-a202-85003939acd6\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.019642 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmgwv\" (UniqueName: \"kubernetes.io/projected/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-kube-api-access-hmgwv\") pod \"nova-metadata-0\" (UID: \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\") " pod="openstack/nova-metadata-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.019662 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6646f183-ffd9-4870-a202-85003939acd6-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"6646f183-ffd9-4870-a202-85003939acd6\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.019686 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\") " pod="openstack/nova-metadata-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.019707 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\") " pod="openstack/nova-metadata-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.020143 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-logs\") pod \"nova-metadata-0\" (UID: \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\") " pod="openstack/nova-metadata-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.023654 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6646f183-ffd9-4870-a202-85003939acd6-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"6646f183-ffd9-4870-a202-85003939acd6\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.023777 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\") " pod="openstack/nova-metadata-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.025336 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/6646f183-ffd9-4870-a202-85003939acd6-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"6646f183-ffd9-4870-a202-85003939acd6\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.025353 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6646f183-ffd9-4870-a202-85003939acd6-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"6646f183-ffd9-4870-a202-85003939acd6\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.026059 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/6646f183-ffd9-4870-a202-85003939acd6-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"6646f183-ffd9-4870-a202-85003939acd6\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.027224 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-config-data\") pod \"nova-metadata-0\" (UID: \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\") " pod="openstack/nova-metadata-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.034733 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\") " pod="openstack/nova-metadata-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.038665 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmgwv\" (UniqueName: \"kubernetes.io/projected/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-kube-api-access-hmgwv\") pod \"nova-metadata-0\" (UID: \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\") " pod="openstack/nova-metadata-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.039041 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6d8g\" (UniqueName: \"kubernetes.io/projected/6646f183-ffd9-4870-a202-85003939acd6-kube-api-access-t6d8g\") pod \"nova-cell1-novncproxy-0\" (UID: \"6646f183-ffd9-4870-a202-85003939acd6\") " pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.088974 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.112605 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.544650 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 13 06:52:14 crc kubenswrapper[5048]: W1213 06:52:14.546709 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6646f183_ffd9_4870_a202_85003939acd6.slice/crio-ba55f8afe53f7fec210132fef0c22e29c842ba0f1504634b5f912f04dd9da7c5 WatchSource:0}: Error finding container ba55f8afe53f7fec210132fef0c22e29c842ba0f1504634b5f912f04dd9da7c5: Status 404 returned error can't find the container with id ba55f8afe53f7fec210132fef0c22e29c842ba0f1504634b5f912f04dd9da7c5 Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.577920 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="233040b0-ce2e-4fe7-8863-9093c75e3e7a" path="/var/lib/kubelet/pods/233040b0-ce2e-4fe7-8863-9093c75e3e7a/volumes" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.578791 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f1dc8fe-cba2-4a26-9d79-42f3be73edb3" path="/var/lib/kubelet/pods/5f1dc8fe-cba2-4a26-9d79-42f3be73edb3/volumes" Dec 13 06:52:14 crc kubenswrapper[5048]: I1213 06:52:14.659957 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 13 06:52:14 crc kubenswrapper[5048]: W1213 06:52:14.664277 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod074ecf6d_bc26_46eb_8cf0_ffcf4612a773.slice/crio-f88c9ea72f56a0b932492af1f5da310eed82d55054a320cf63dd4c65262674b5 WatchSource:0}: Error finding container f88c9ea72f56a0b932492af1f5da310eed82d55054a320cf63dd4c65262674b5: Status 404 returned error can't find the container with id f88c9ea72f56a0b932492af1f5da310eed82d55054a320cf63dd4c65262674b5 Dec 13 06:52:15 crc kubenswrapper[5048]: I1213 06:52:15.411759 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"6646f183-ffd9-4870-a202-85003939acd6","Type":"ContainerStarted","Data":"ba55f8afe53f7fec210132fef0c22e29c842ba0f1504634b5f912f04dd9da7c5"} Dec 13 06:52:15 crc kubenswrapper[5048]: I1213 06:52:15.413294 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"074ecf6d-bc26-46eb-8cf0-ffcf4612a773","Type":"ContainerStarted","Data":"f88c9ea72f56a0b932492af1f5da310eed82d55054a320cf63dd4c65262674b5"} Dec 13 06:52:16 crc kubenswrapper[5048]: I1213 06:52:16.216242 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:52:16 crc kubenswrapper[5048]: I1213 06:52:16.216627 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:52:16 crc kubenswrapper[5048]: I1213 06:52:16.424900 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"6646f183-ffd9-4870-a202-85003939acd6","Type":"ContainerStarted","Data":"62b5595b3a18522f05ef3f010f98f8ba61d506308fb6f7b107a0fc80e8ce15f9"} Dec 13 06:52:16 crc kubenswrapper[5048]: I1213 06:52:16.426859 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"074ecf6d-bc26-46eb-8cf0-ffcf4612a773","Type":"ContainerStarted","Data":"f0a995fa6063730169e9bc8f6bd76c3aa3fdfc5b0641b5488c561c19736e2f87"} Dec 13 06:52:16 crc kubenswrapper[5048]: I1213 06:52:16.426896 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"074ecf6d-bc26-46eb-8cf0-ffcf4612a773","Type":"ContainerStarted","Data":"e5a1dd28b1e0344a486750bb5885047e1a5a633bb8ad17100e85358e976cb9f8"} Dec 13 06:52:16 crc kubenswrapper[5048]: I1213 06:52:16.443755 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.443737976 podStartE2EDuration="3.443737976s" podCreationTimestamp="2025-12-13 06:52:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:52:16.443375907 +0000 UTC m=+1370.309970518" watchObservedRunningTime="2025-12-13 06:52:16.443737976 +0000 UTC m=+1370.310332567" Dec 13 06:52:16 crc kubenswrapper[5048]: I1213 06:52:16.484256 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.484234146 podStartE2EDuration="3.484234146s" podCreationTimestamp="2025-12-13 06:52:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:52:16.473191919 +0000 UTC m=+1370.339786530" watchObservedRunningTime="2025-12-13 06:52:16.484234146 +0000 UTC m=+1370.350828717" Dec 13 06:52:16 crc kubenswrapper[5048]: I1213 06:52:16.908296 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 13 06:52:16 crc kubenswrapper[5048]: I1213 06:52:16.909467 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 13 06:52:16 crc kubenswrapper[5048]: I1213 06:52:16.909689 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 13 06:52:16 crc kubenswrapper[5048]: I1213 06:52:16.912385 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.450752 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.454630 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.623148 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-w9nkl"] Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.624789 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.703905 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-w9nkl"] Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.799965 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9wp8\" (UniqueName: \"kubernetes.io/projected/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-kube-api-access-k9wp8\") pod \"dnsmasq-dns-cd5cbd7b9-w9nkl\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.800389 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-config\") pod \"dnsmasq-dns-cd5cbd7b9-w9nkl\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.800488 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-w9nkl\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.800537 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-w9nkl\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.800647 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-w9nkl\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.800692 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-w9nkl\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.902534 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-config\") pod \"dnsmasq-dns-cd5cbd7b9-w9nkl\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.902617 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-w9nkl\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.902667 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-w9nkl\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.902727 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-w9nkl\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.902770 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-w9nkl\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.902795 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9wp8\" (UniqueName: \"kubernetes.io/projected/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-kube-api-access-k9wp8\") pod \"dnsmasq-dns-cd5cbd7b9-w9nkl\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.903521 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-config\") pod \"dnsmasq-dns-cd5cbd7b9-w9nkl\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.903520 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-w9nkl\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.904008 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-w9nkl\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.904099 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-w9nkl\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.904482 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-w9nkl\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:52:17 crc kubenswrapper[5048]: I1213 06:52:17.926223 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9wp8\" (UniqueName: \"kubernetes.io/projected/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-kube-api-access-k9wp8\") pod \"dnsmasq-dns-cd5cbd7b9-w9nkl\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:52:18 crc kubenswrapper[5048]: I1213 06:52:18.029189 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:52:18 crc kubenswrapper[5048]: W1213 06:52:18.528652 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod92e0ee4a_6f6b_4e3c_85c4_2cd3c8555048.slice/crio-db2770aea3907b64757d2c370217c5dd67271f32fcb76a0aaae13a5c0f2b0010 WatchSource:0}: Error finding container db2770aea3907b64757d2c370217c5dd67271f32fcb76a0aaae13a5c0f2b0010: Status 404 returned error can't find the container with id db2770aea3907b64757d2c370217c5dd67271f32fcb76a0aaae13a5c0f2b0010 Dec 13 06:52:18 crc kubenswrapper[5048]: I1213 06:52:18.531823 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-w9nkl"] Dec 13 06:52:19 crc kubenswrapper[5048]: I1213 06:52:19.089897 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:19 crc kubenswrapper[5048]: I1213 06:52:19.114174 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 13 06:52:19 crc kubenswrapper[5048]: I1213 06:52:19.114232 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 13 06:52:19 crc kubenswrapper[5048]: I1213 06:52:19.472926 5048 generic.go:334] "Generic (PLEG): container finished" podID="92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048" containerID="4f94737c057a78f6e311402badcbcbbe036078fa18b70b3cf76d218f96f7f0df" exitCode=0 Dec 13 06:52:19 crc kubenswrapper[5048]: I1213 06:52:19.473013 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" event={"ID":"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048","Type":"ContainerDied","Data":"4f94737c057a78f6e311402badcbcbbe036078fa18b70b3cf76d218f96f7f0df"} Dec 13 06:52:19 crc kubenswrapper[5048]: I1213 06:52:19.473468 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" event={"ID":"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048","Type":"ContainerStarted","Data":"db2770aea3907b64757d2c370217c5dd67271f32fcb76a0aaae13a5c0f2b0010"} Dec 13 06:52:19 crc kubenswrapper[5048]: I1213 06:52:19.562462 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:52:19 crc kubenswrapper[5048]: I1213 06:52:19.563985 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5ee8f067-026a-4612-aba4-3cc8976dc02b" containerName="ceilometer-central-agent" containerID="cri-o://5678e9a07a9764ffeacf2499dd1b126005d0b9d0ddd2834e7c85b084432753b1" gracePeriod=30 Dec 13 06:52:19 crc kubenswrapper[5048]: I1213 06:52:19.564081 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5ee8f067-026a-4612-aba4-3cc8976dc02b" containerName="proxy-httpd" containerID="cri-o://c849f85d50b7c72d7bf8b7b7f0aa851fb2f4be8ba98763ee2b645aff7d4ba0eb" gracePeriod=30 Dec 13 06:52:19 crc kubenswrapper[5048]: I1213 06:52:19.564117 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5ee8f067-026a-4612-aba4-3cc8976dc02b" containerName="ceilometer-notification-agent" containerID="cri-o://12fa0e4a983ca089a6bde9c9d2c0e2bb854e8e7fcd9ae0c316f434b4680584ae" gracePeriod=30 Dec 13 06:52:19 crc kubenswrapper[5048]: I1213 06:52:19.564110 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5ee8f067-026a-4612-aba4-3cc8976dc02b" containerName="sg-core" containerID="cri-o://7e2a50c3d819afd8d20d882e8c105bf2b9adb54c96fd8aebe0821b3d130c3c1f" gracePeriod=30 Dec 13 06:52:19 crc kubenswrapper[5048]: I1213 06:52:19.571869 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="5ee8f067-026a-4612-aba4-3cc8976dc02b" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.192:3000/\": read tcp 10.217.0.2:40462->10.217.0.192:3000: read: connection reset by peer" Dec 13 06:52:20 crc kubenswrapper[5048]: I1213 06:52:20.221400 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 13 06:52:20 crc kubenswrapper[5048]: I1213 06:52:20.484778 5048 generic.go:334] "Generic (PLEG): container finished" podID="5ee8f067-026a-4612-aba4-3cc8976dc02b" containerID="c849f85d50b7c72d7bf8b7b7f0aa851fb2f4be8ba98763ee2b645aff7d4ba0eb" exitCode=0 Dec 13 06:52:20 crc kubenswrapper[5048]: I1213 06:52:20.484818 5048 generic.go:334] "Generic (PLEG): container finished" podID="5ee8f067-026a-4612-aba4-3cc8976dc02b" containerID="7e2a50c3d819afd8d20d882e8c105bf2b9adb54c96fd8aebe0821b3d130c3c1f" exitCode=2 Dec 13 06:52:20 crc kubenswrapper[5048]: I1213 06:52:20.484829 5048 generic.go:334] "Generic (PLEG): container finished" podID="5ee8f067-026a-4612-aba4-3cc8976dc02b" containerID="5678e9a07a9764ffeacf2499dd1b126005d0b9d0ddd2834e7c85b084432753b1" exitCode=0 Dec 13 06:52:20 crc kubenswrapper[5048]: I1213 06:52:20.484846 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ee8f067-026a-4612-aba4-3cc8976dc02b","Type":"ContainerDied","Data":"c849f85d50b7c72d7bf8b7b7f0aa851fb2f4be8ba98763ee2b645aff7d4ba0eb"} Dec 13 06:52:20 crc kubenswrapper[5048]: I1213 06:52:20.484896 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ee8f067-026a-4612-aba4-3cc8976dc02b","Type":"ContainerDied","Data":"7e2a50c3d819afd8d20d882e8c105bf2b9adb54c96fd8aebe0821b3d130c3c1f"} Dec 13 06:52:20 crc kubenswrapper[5048]: I1213 06:52:20.484911 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ee8f067-026a-4612-aba4-3cc8976dc02b","Type":"ContainerDied","Data":"5678e9a07a9764ffeacf2499dd1b126005d0b9d0ddd2834e7c85b084432753b1"} Dec 13 06:52:20 crc kubenswrapper[5048]: I1213 06:52:20.487001 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" event={"ID":"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048","Type":"ContainerStarted","Data":"cbec161e4e03aefd8a7b0d98502ed6591fea05ca350976b48fb88a6aaef172f4"} Dec 13 06:52:20 crc kubenswrapper[5048]: I1213 06:52:20.487156 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2071eed4-1f89-426e-ba3e-d79fad42a378" containerName="nova-api-log" containerID="cri-o://e3b74ac41a0bfaa5f38e0869bc122ac88e1eb988e75f2c5ad698e73478e3474f" gracePeriod=30 Dec 13 06:52:20 crc kubenswrapper[5048]: I1213 06:52:20.487209 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2071eed4-1f89-426e-ba3e-d79fad42a378" containerName="nova-api-api" containerID="cri-o://efc2a5fd4eb3b12fcbbdd454e88197cf42c3c37c66d0933b8004c2d090cf981a" gracePeriod=30 Dec 13 06:52:21 crc kubenswrapper[5048]: I1213 06:52:21.499101 5048 generic.go:334] "Generic (PLEG): container finished" podID="2071eed4-1f89-426e-ba3e-d79fad42a378" containerID="e3b74ac41a0bfaa5f38e0869bc122ac88e1eb988e75f2c5ad698e73478e3474f" exitCode=143 Dec 13 06:52:21 crc kubenswrapper[5048]: I1213 06:52:21.499479 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2071eed4-1f89-426e-ba3e-d79fad42a378","Type":"ContainerDied","Data":"e3b74ac41a0bfaa5f38e0869bc122ac88e1eb988e75f2c5ad698e73478e3474f"} Dec 13 06:52:21 crc kubenswrapper[5048]: I1213 06:52:21.501524 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.090060 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.114397 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.114980 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.115040 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.137009 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" podStartSLOduration=7.136988435 podStartE2EDuration="7.136988435s" podCreationTimestamp="2025-12-13 06:52:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:52:20.508425608 +0000 UTC m=+1374.375020209" watchObservedRunningTime="2025-12-13 06:52:24.136988435 +0000 UTC m=+1378.003583016" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.188720 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.276077 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2071eed4-1f89-426e-ba3e-d79fad42a378-combined-ca-bundle\") pod \"2071eed4-1f89-426e-ba3e-d79fad42a378\" (UID: \"2071eed4-1f89-426e-ba3e-d79fad42a378\") " Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.276211 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2071eed4-1f89-426e-ba3e-d79fad42a378-config-data\") pod \"2071eed4-1f89-426e-ba3e-d79fad42a378\" (UID: \"2071eed4-1f89-426e-ba3e-d79fad42a378\") " Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.276481 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2071eed4-1f89-426e-ba3e-d79fad42a378-logs\") pod \"2071eed4-1f89-426e-ba3e-d79fad42a378\" (UID: \"2071eed4-1f89-426e-ba3e-d79fad42a378\") " Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.276949 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pvkmz\" (UniqueName: \"kubernetes.io/projected/2071eed4-1f89-426e-ba3e-d79fad42a378-kube-api-access-pvkmz\") pod \"2071eed4-1f89-426e-ba3e-d79fad42a378\" (UID: \"2071eed4-1f89-426e-ba3e-d79fad42a378\") " Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.279295 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2071eed4-1f89-426e-ba3e-d79fad42a378-logs" (OuterVolumeSpecName: "logs") pod "2071eed4-1f89-426e-ba3e-d79fad42a378" (UID: "2071eed4-1f89-426e-ba3e-d79fad42a378"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.292749 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2071eed4-1f89-426e-ba3e-d79fad42a378-kube-api-access-pvkmz" (OuterVolumeSpecName: "kube-api-access-pvkmz") pod "2071eed4-1f89-426e-ba3e-d79fad42a378" (UID: "2071eed4-1f89-426e-ba3e-d79fad42a378"). InnerVolumeSpecName "kube-api-access-pvkmz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.339842 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2071eed4-1f89-426e-ba3e-d79fad42a378-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2071eed4-1f89-426e-ba3e-d79fad42a378" (UID: "2071eed4-1f89-426e-ba3e-d79fad42a378"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.378918 5048 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2071eed4-1f89-426e-ba3e-d79fad42a378-logs\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.378956 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pvkmz\" (UniqueName: \"kubernetes.io/projected/2071eed4-1f89-426e-ba3e-d79fad42a378-kube-api-access-pvkmz\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.378966 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2071eed4-1f89-426e-ba3e-d79fad42a378-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.415555 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2071eed4-1f89-426e-ba3e-d79fad42a378-config-data" (OuterVolumeSpecName: "config-data") pod "2071eed4-1f89-426e-ba3e-d79fad42a378" (UID: "2071eed4-1f89-426e-ba3e-d79fad42a378"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.467483 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.483667 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2071eed4-1f89-426e-ba3e-d79fad42a378-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.534026 5048 generic.go:334] "Generic (PLEG): container finished" podID="2071eed4-1f89-426e-ba3e-d79fad42a378" containerID="efc2a5fd4eb3b12fcbbdd454e88197cf42c3c37c66d0933b8004c2d090cf981a" exitCode=0 Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.534093 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.534123 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2071eed4-1f89-426e-ba3e-d79fad42a378","Type":"ContainerDied","Data":"efc2a5fd4eb3b12fcbbdd454e88197cf42c3c37c66d0933b8004c2d090cf981a"} Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.534156 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2071eed4-1f89-426e-ba3e-d79fad42a378","Type":"ContainerDied","Data":"ab55b62f5b0901db2ff2a2247aad4ef70b39e8ddd8d28951e2b5abc7993626f5"} Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.534176 5048 scope.go:117] "RemoveContainer" containerID="efc2a5fd4eb3b12fcbbdd454e88197cf42c3c37c66d0933b8004c2d090cf981a" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.570357 5048 scope.go:117] "RemoveContainer" containerID="e3b74ac41a0bfaa5f38e0869bc122ac88e1eb988e75f2c5ad698e73478e3474f" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.570552 5048 generic.go:334] "Generic (PLEG): container finished" podID="5ee8f067-026a-4612-aba4-3cc8976dc02b" containerID="12fa0e4a983ca089a6bde9c9d2c0e2bb854e8e7fcd9ae0c316f434b4680584ae" exitCode=0 Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.571867 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.584567 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ee8f067-026a-4612-aba4-3cc8976dc02b-log-httpd\") pod \"5ee8f067-026a-4612-aba4-3cc8976dc02b\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.584655 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-combined-ca-bundle\") pod \"5ee8f067-026a-4612-aba4-3cc8976dc02b\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.584706 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ee8f067-026a-4612-aba4-3cc8976dc02b-run-httpd\") pod \"5ee8f067-026a-4612-aba4-3cc8976dc02b\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.584813 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nlqc4\" (UniqueName: \"kubernetes.io/projected/5ee8f067-026a-4612-aba4-3cc8976dc02b-kube-api-access-nlqc4\") pod \"5ee8f067-026a-4612-aba4-3cc8976dc02b\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.584853 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-scripts\") pod \"5ee8f067-026a-4612-aba4-3cc8976dc02b\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.584899 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-config-data\") pod \"5ee8f067-026a-4612-aba4-3cc8976dc02b\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.584951 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-ceilometer-tls-certs\") pod \"5ee8f067-026a-4612-aba4-3cc8976dc02b\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.584984 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-sg-core-conf-yaml\") pod \"5ee8f067-026a-4612-aba4-3cc8976dc02b\" (UID: \"5ee8f067-026a-4612-aba4-3cc8976dc02b\") " Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.586785 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ee8f067-026a-4612-aba4-3cc8976dc02b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5ee8f067-026a-4612-aba4-3cc8976dc02b" (UID: "5ee8f067-026a-4612-aba4-3cc8976dc02b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.589578 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ee8f067-026a-4612-aba4-3cc8976dc02b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5ee8f067-026a-4612-aba4-3cc8976dc02b" (UID: "5ee8f067-026a-4612-aba4-3cc8976dc02b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.589597 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ee8f067-026a-4612-aba4-3cc8976dc02b-kube-api-access-nlqc4" (OuterVolumeSpecName: "kube-api-access-nlqc4") pod "5ee8f067-026a-4612-aba4-3cc8976dc02b" (UID: "5ee8f067-026a-4612-aba4-3cc8976dc02b"). InnerVolumeSpecName "kube-api-access-nlqc4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.595857 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-scripts" (OuterVolumeSpecName: "scripts") pod "5ee8f067-026a-4612-aba4-3cc8976dc02b" (UID: "5ee8f067-026a-4612-aba4-3cc8976dc02b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.605943 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ee8f067-026a-4612-aba4-3cc8976dc02b","Type":"ContainerDied","Data":"12fa0e4a983ca089a6bde9c9d2c0e2bb854e8e7fcd9ae0c316f434b4680584ae"} Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.606004 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ee8f067-026a-4612-aba4-3cc8976dc02b","Type":"ContainerDied","Data":"7b8e8833ee81aa9594b417cd8be4adaa1f5468b4c2744e36731e1467fde7d73b"} Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.606024 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.606045 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.606097 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.615910 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 13 06:52:24 crc kubenswrapper[5048]: E1213 06:52:24.616370 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2071eed4-1f89-426e-ba3e-d79fad42a378" containerName="nova-api-log" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.616393 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="2071eed4-1f89-426e-ba3e-d79fad42a378" containerName="nova-api-log" Dec 13 06:52:24 crc kubenswrapper[5048]: E1213 06:52:24.616414 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2071eed4-1f89-426e-ba3e-d79fad42a378" containerName="nova-api-api" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.616423 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="2071eed4-1f89-426e-ba3e-d79fad42a378" containerName="nova-api-api" Dec 13 06:52:24 crc kubenswrapper[5048]: E1213 06:52:24.616479 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ee8f067-026a-4612-aba4-3cc8976dc02b" containerName="ceilometer-notification-agent" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.616489 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ee8f067-026a-4612-aba4-3cc8976dc02b" containerName="ceilometer-notification-agent" Dec 13 06:52:24 crc kubenswrapper[5048]: E1213 06:52:24.616500 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ee8f067-026a-4612-aba4-3cc8976dc02b" containerName="ceilometer-central-agent" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.616508 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ee8f067-026a-4612-aba4-3cc8976dc02b" containerName="ceilometer-central-agent" Dec 13 06:52:24 crc kubenswrapper[5048]: E1213 06:52:24.616534 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ee8f067-026a-4612-aba4-3cc8976dc02b" containerName="sg-core" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.616541 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ee8f067-026a-4612-aba4-3cc8976dc02b" containerName="sg-core" Dec 13 06:52:24 crc kubenswrapper[5048]: E1213 06:52:24.616555 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ee8f067-026a-4612-aba4-3cc8976dc02b" containerName="proxy-httpd" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.616564 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ee8f067-026a-4612-aba4-3cc8976dc02b" containerName="proxy-httpd" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.616778 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ee8f067-026a-4612-aba4-3cc8976dc02b" containerName="ceilometer-notification-agent" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.616793 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ee8f067-026a-4612-aba4-3cc8976dc02b" containerName="proxy-httpd" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.616812 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="2071eed4-1f89-426e-ba3e-d79fad42a378" containerName="nova-api-api" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.616825 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="2071eed4-1f89-426e-ba3e-d79fad42a378" containerName="nova-api-log" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.616845 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ee8f067-026a-4612-aba4-3cc8976dc02b" containerName="ceilometer-central-agent" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.616861 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ee8f067-026a-4612-aba4-3cc8976dc02b" containerName="sg-core" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.620936 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.631882 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.632070 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.632172 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.632321 5048 scope.go:117] "RemoveContainer" containerID="efc2a5fd4eb3b12fcbbdd454e88197cf42c3c37c66d0933b8004c2d090cf981a" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.632429 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 13 06:52:24 crc kubenswrapper[5048]: E1213 06:52:24.634376 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"efc2a5fd4eb3b12fcbbdd454e88197cf42c3c37c66d0933b8004c2d090cf981a\": container with ID starting with efc2a5fd4eb3b12fcbbdd454e88197cf42c3c37c66d0933b8004c2d090cf981a not found: ID does not exist" containerID="efc2a5fd4eb3b12fcbbdd454e88197cf42c3c37c66d0933b8004c2d090cf981a" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.634410 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"efc2a5fd4eb3b12fcbbdd454e88197cf42c3c37c66d0933b8004c2d090cf981a"} err="failed to get container status \"efc2a5fd4eb3b12fcbbdd454e88197cf42c3c37c66d0933b8004c2d090cf981a\": rpc error: code = NotFound desc = could not find container \"efc2a5fd4eb3b12fcbbdd454e88197cf42c3c37c66d0933b8004c2d090cf981a\": container with ID starting with efc2a5fd4eb3b12fcbbdd454e88197cf42c3c37c66d0933b8004c2d090cf981a not found: ID does not exist" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.634457 5048 scope.go:117] "RemoveContainer" containerID="e3b74ac41a0bfaa5f38e0869bc122ac88e1eb988e75f2c5ad698e73478e3474f" Dec 13 06:52:24 crc kubenswrapper[5048]: E1213 06:52:24.634734 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e3b74ac41a0bfaa5f38e0869bc122ac88e1eb988e75f2c5ad698e73478e3474f\": container with ID starting with e3b74ac41a0bfaa5f38e0869bc122ac88e1eb988e75f2c5ad698e73478e3474f not found: ID does not exist" containerID="e3b74ac41a0bfaa5f38e0869bc122ac88e1eb988e75f2c5ad698e73478e3474f" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.634752 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e3b74ac41a0bfaa5f38e0869bc122ac88e1eb988e75f2c5ad698e73478e3474f"} err="failed to get container status \"e3b74ac41a0bfaa5f38e0869bc122ac88e1eb988e75f2c5ad698e73478e3474f\": rpc error: code = NotFound desc = could not find container \"e3b74ac41a0bfaa5f38e0869bc122ac88e1eb988e75f2c5ad698e73478e3474f\": container with ID starting with e3b74ac41a0bfaa5f38e0869bc122ac88e1eb988e75f2c5ad698e73478e3474f not found: ID does not exist" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.634767 5048 scope.go:117] "RemoveContainer" containerID="c849f85d50b7c72d7bf8b7b7f0aa851fb2f4be8ba98763ee2b645aff7d4ba0eb" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.687683 5048 scope.go:117] "RemoveContainer" containerID="7e2a50c3d819afd8d20d882e8c105bf2b9adb54c96fd8aebe0821b3d130c3c1f" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.687693 5048 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ee8f067-026a-4612-aba4-3cc8976dc02b-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.687888 5048 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ee8f067-026a-4612-aba4-3cc8976dc02b-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.687903 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nlqc4\" (UniqueName: \"kubernetes.io/projected/5ee8f067-026a-4612-aba4-3cc8976dc02b-kube-api-access-nlqc4\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.687915 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.734636 5048 scope.go:117] "RemoveContainer" containerID="12fa0e4a983ca089a6bde9c9d2c0e2bb854e8e7fcd9ae0c316f434b4680584ae" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.734670 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "5ee8f067-026a-4612-aba4-3cc8976dc02b" (UID: "5ee8f067-026a-4612-aba4-3cc8976dc02b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.759992 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "5ee8f067-026a-4612-aba4-3cc8976dc02b" (UID: "5ee8f067-026a-4612-aba4-3cc8976dc02b"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.761678 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5ee8f067-026a-4612-aba4-3cc8976dc02b" (UID: "5ee8f067-026a-4612-aba4-3cc8976dc02b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.793454 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-internal-tls-certs\") pod \"nova-api-0\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " pod="openstack/nova-api-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.793546 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49f372c6-68f7-45d2-a838-6336bb065fec-logs\") pod \"nova-api-0\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " pod="openstack/nova-api-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.793663 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzkww\" (UniqueName: \"kubernetes.io/projected/49f372c6-68f7-45d2-a838-6336bb065fec-kube-api-access-lzkww\") pod \"nova-api-0\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " pod="openstack/nova-api-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.793754 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " pod="openstack/nova-api-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.793782 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-config-data\") pod \"nova-api-0\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " pod="openstack/nova-api-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.793918 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-public-tls-certs\") pod \"nova-api-0\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " pod="openstack/nova-api-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.794047 5048 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.794065 5048 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.794078 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.849059 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-r4vpp"] Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.852895 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-r4vpp" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.855354 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.855615 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.869975 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-r4vpp"] Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.876707 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-config-data" (OuterVolumeSpecName: "config-data") pod "5ee8f067-026a-4612-aba4-3cc8976dc02b" (UID: "5ee8f067-026a-4612-aba4-3cc8976dc02b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.899198 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghm5t\" (UniqueName: \"kubernetes.io/projected/1b83ee75-f7b6-4c4e-9439-49b4646efc16-kube-api-access-ghm5t\") pod \"nova-cell1-cell-mapping-r4vpp\" (UID: \"1b83ee75-f7b6-4c4e-9439-49b4646efc16\") " pod="openstack/nova-cell1-cell-mapping-r4vpp" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.899252 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzkww\" (UniqueName: \"kubernetes.io/projected/49f372c6-68f7-45d2-a838-6336bb065fec-kube-api-access-lzkww\") pod \"nova-api-0\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " pod="openstack/nova-api-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.899276 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b83ee75-f7b6-4c4e-9439-49b4646efc16-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-r4vpp\" (UID: \"1b83ee75-f7b6-4c4e-9439-49b4646efc16\") " pod="openstack/nova-cell1-cell-mapping-r4vpp" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.899328 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " pod="openstack/nova-api-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.899344 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-config-data\") pod \"nova-api-0\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " pod="openstack/nova-api-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.899398 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-public-tls-certs\") pod \"nova-api-0\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " pod="openstack/nova-api-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.899419 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b83ee75-f7b6-4c4e-9439-49b4646efc16-scripts\") pod \"nova-cell1-cell-mapping-r4vpp\" (UID: \"1b83ee75-f7b6-4c4e-9439-49b4646efc16\") " pod="openstack/nova-cell1-cell-mapping-r4vpp" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.899470 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-internal-tls-certs\") pod \"nova-api-0\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " pod="openstack/nova-api-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.899503 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b83ee75-f7b6-4c4e-9439-49b4646efc16-config-data\") pod \"nova-cell1-cell-mapping-r4vpp\" (UID: \"1b83ee75-f7b6-4c4e-9439-49b4646efc16\") " pod="openstack/nova-cell1-cell-mapping-r4vpp" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.899519 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49f372c6-68f7-45d2-a838-6336bb065fec-logs\") pod \"nova-api-0\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " pod="openstack/nova-api-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.899574 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ee8f067-026a-4612-aba4-3cc8976dc02b-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.900014 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49f372c6-68f7-45d2-a838-6336bb065fec-logs\") pod \"nova-api-0\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " pod="openstack/nova-api-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.905355 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " pod="openstack/nova-api-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.906744 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-config-data\") pod \"nova-api-0\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " pod="openstack/nova-api-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.907241 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-public-tls-certs\") pod \"nova-api-0\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " pod="openstack/nova-api-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.910041 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-internal-tls-certs\") pod \"nova-api-0\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " pod="openstack/nova-api-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.925110 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzkww\" (UniqueName: \"kubernetes.io/projected/49f372c6-68f7-45d2-a838-6336bb065fec-kube-api-access-lzkww\") pod \"nova-api-0\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " pod="openstack/nova-api-0" Dec 13 06:52:24 crc kubenswrapper[5048]: I1213 06:52:24.971814 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.003452 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b83ee75-f7b6-4c4e-9439-49b4646efc16-scripts\") pod \"nova-cell1-cell-mapping-r4vpp\" (UID: \"1b83ee75-f7b6-4c4e-9439-49b4646efc16\") " pod="openstack/nova-cell1-cell-mapping-r4vpp" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.003539 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b83ee75-f7b6-4c4e-9439-49b4646efc16-config-data\") pod \"nova-cell1-cell-mapping-r4vpp\" (UID: \"1b83ee75-f7b6-4c4e-9439-49b4646efc16\") " pod="openstack/nova-cell1-cell-mapping-r4vpp" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.003593 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghm5t\" (UniqueName: \"kubernetes.io/projected/1b83ee75-f7b6-4c4e-9439-49b4646efc16-kube-api-access-ghm5t\") pod \"nova-cell1-cell-mapping-r4vpp\" (UID: \"1b83ee75-f7b6-4c4e-9439-49b4646efc16\") " pod="openstack/nova-cell1-cell-mapping-r4vpp" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.003615 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b83ee75-f7b6-4c4e-9439-49b4646efc16-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-r4vpp\" (UID: \"1b83ee75-f7b6-4c4e-9439-49b4646efc16\") " pod="openstack/nova-cell1-cell-mapping-r4vpp" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.012103 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b83ee75-f7b6-4c4e-9439-49b4646efc16-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-r4vpp\" (UID: \"1b83ee75-f7b6-4c4e-9439-49b4646efc16\") " pod="openstack/nova-cell1-cell-mapping-r4vpp" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.023019 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b83ee75-f7b6-4c4e-9439-49b4646efc16-config-data\") pod \"nova-cell1-cell-mapping-r4vpp\" (UID: \"1b83ee75-f7b6-4c4e-9439-49b4646efc16\") " pod="openstack/nova-cell1-cell-mapping-r4vpp" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.023827 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b83ee75-f7b6-4c4e-9439-49b4646efc16-scripts\") pod \"nova-cell1-cell-mapping-r4vpp\" (UID: \"1b83ee75-f7b6-4c4e-9439-49b4646efc16\") " pod="openstack/nova-cell1-cell-mapping-r4vpp" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.026404 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghm5t\" (UniqueName: \"kubernetes.io/projected/1b83ee75-f7b6-4c4e-9439-49b4646efc16-kube-api-access-ghm5t\") pod \"nova-cell1-cell-mapping-r4vpp\" (UID: \"1b83ee75-f7b6-4c4e-9439-49b4646efc16\") " pod="openstack/nova-cell1-cell-mapping-r4vpp" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.136525 5048 scope.go:117] "RemoveContainer" containerID="5678e9a07a9764ffeacf2499dd1b126005d0b9d0ddd2834e7c85b084432753b1" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.153566 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="074ecf6d-bc26-46eb-8cf0-ffcf4612a773" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.195:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.153933 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="074ecf6d-bc26-46eb-8cf0-ffcf4612a773" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.195:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.155356 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-r4vpp" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.228137 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.256782 5048 scope.go:117] "RemoveContainer" containerID="c849f85d50b7c72d7bf8b7b7f0aa851fb2f4be8ba98763ee2b645aff7d4ba0eb" Dec 13 06:52:25 crc kubenswrapper[5048]: E1213 06:52:25.258108 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c849f85d50b7c72d7bf8b7b7f0aa851fb2f4be8ba98763ee2b645aff7d4ba0eb\": container with ID starting with c849f85d50b7c72d7bf8b7b7f0aa851fb2f4be8ba98763ee2b645aff7d4ba0eb not found: ID does not exist" containerID="c849f85d50b7c72d7bf8b7b7f0aa851fb2f4be8ba98763ee2b645aff7d4ba0eb" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.258188 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c849f85d50b7c72d7bf8b7b7f0aa851fb2f4be8ba98763ee2b645aff7d4ba0eb"} err="failed to get container status \"c849f85d50b7c72d7bf8b7b7f0aa851fb2f4be8ba98763ee2b645aff7d4ba0eb\": rpc error: code = NotFound desc = could not find container \"c849f85d50b7c72d7bf8b7b7f0aa851fb2f4be8ba98763ee2b645aff7d4ba0eb\": container with ID starting with c849f85d50b7c72d7bf8b7b7f0aa851fb2f4be8ba98763ee2b645aff7d4ba0eb not found: ID does not exist" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.258228 5048 scope.go:117] "RemoveContainer" containerID="7e2a50c3d819afd8d20d882e8c105bf2b9adb54c96fd8aebe0821b3d130c3c1f" Dec 13 06:52:25 crc kubenswrapper[5048]: E1213 06:52:25.258671 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e2a50c3d819afd8d20d882e8c105bf2b9adb54c96fd8aebe0821b3d130c3c1f\": container with ID starting with 7e2a50c3d819afd8d20d882e8c105bf2b9adb54c96fd8aebe0821b3d130c3c1f not found: ID does not exist" containerID="7e2a50c3d819afd8d20d882e8c105bf2b9adb54c96fd8aebe0821b3d130c3c1f" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.258708 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e2a50c3d819afd8d20d882e8c105bf2b9adb54c96fd8aebe0821b3d130c3c1f"} err="failed to get container status \"7e2a50c3d819afd8d20d882e8c105bf2b9adb54c96fd8aebe0821b3d130c3c1f\": rpc error: code = NotFound desc = could not find container \"7e2a50c3d819afd8d20d882e8c105bf2b9adb54c96fd8aebe0821b3d130c3c1f\": container with ID starting with 7e2a50c3d819afd8d20d882e8c105bf2b9adb54c96fd8aebe0821b3d130c3c1f not found: ID does not exist" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.258738 5048 scope.go:117] "RemoveContainer" containerID="12fa0e4a983ca089a6bde9c9d2c0e2bb854e8e7fcd9ae0c316f434b4680584ae" Dec 13 06:52:25 crc kubenswrapper[5048]: E1213 06:52:25.258998 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12fa0e4a983ca089a6bde9c9d2c0e2bb854e8e7fcd9ae0c316f434b4680584ae\": container with ID starting with 12fa0e4a983ca089a6bde9c9d2c0e2bb854e8e7fcd9ae0c316f434b4680584ae not found: ID does not exist" containerID="12fa0e4a983ca089a6bde9c9d2c0e2bb854e8e7fcd9ae0c316f434b4680584ae" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.259058 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12fa0e4a983ca089a6bde9c9d2c0e2bb854e8e7fcd9ae0c316f434b4680584ae"} err="failed to get container status \"12fa0e4a983ca089a6bde9c9d2c0e2bb854e8e7fcd9ae0c316f434b4680584ae\": rpc error: code = NotFound desc = could not find container \"12fa0e4a983ca089a6bde9c9d2c0e2bb854e8e7fcd9ae0c316f434b4680584ae\": container with ID starting with 12fa0e4a983ca089a6bde9c9d2c0e2bb854e8e7fcd9ae0c316f434b4680584ae not found: ID does not exist" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.259080 5048 scope.go:117] "RemoveContainer" containerID="5678e9a07a9764ffeacf2499dd1b126005d0b9d0ddd2834e7c85b084432753b1" Dec 13 06:52:25 crc kubenswrapper[5048]: E1213 06:52:25.259524 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5678e9a07a9764ffeacf2499dd1b126005d0b9d0ddd2834e7c85b084432753b1\": container with ID starting with 5678e9a07a9764ffeacf2499dd1b126005d0b9d0ddd2834e7c85b084432753b1 not found: ID does not exist" containerID="5678e9a07a9764ffeacf2499dd1b126005d0b9d0ddd2834e7c85b084432753b1" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.259563 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5678e9a07a9764ffeacf2499dd1b126005d0b9d0ddd2834e7c85b084432753b1"} err="failed to get container status \"5678e9a07a9764ffeacf2499dd1b126005d0b9d0ddd2834e7c85b084432753b1\": rpc error: code = NotFound desc = could not find container \"5678e9a07a9764ffeacf2499dd1b126005d0b9d0ddd2834e7c85b084432753b1\": container with ID starting with 5678e9a07a9764ffeacf2499dd1b126005d0b9d0ddd2834e7c85b084432753b1 not found: ID does not exist" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.259618 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.272742 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.285589 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.285881 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.289385 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.289618 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.289625 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.421608 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6d85513-7b75-40e7-9eae-08544cccbc55-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.421659 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a6d85513-7b75-40e7-9eae-08544cccbc55-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.421701 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a6d85513-7b75-40e7-9eae-08544cccbc55-run-httpd\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.421766 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a6d85513-7b75-40e7-9eae-08544cccbc55-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.421841 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a6d85513-7b75-40e7-9eae-08544cccbc55-scripts\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.421861 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6d85513-7b75-40e7-9eae-08544cccbc55-config-data\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.421877 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qf2l\" (UniqueName: \"kubernetes.io/projected/a6d85513-7b75-40e7-9eae-08544cccbc55-kube-api-access-2qf2l\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.421933 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a6d85513-7b75-40e7-9eae-08544cccbc55-log-httpd\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.493653 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.523885 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a6d85513-7b75-40e7-9eae-08544cccbc55-log-httpd\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.524094 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6d85513-7b75-40e7-9eae-08544cccbc55-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.524126 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a6d85513-7b75-40e7-9eae-08544cccbc55-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.524184 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a6d85513-7b75-40e7-9eae-08544cccbc55-run-httpd\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.524264 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a6d85513-7b75-40e7-9eae-08544cccbc55-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.524395 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a6d85513-7b75-40e7-9eae-08544cccbc55-scripts\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.524426 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qf2l\" (UniqueName: \"kubernetes.io/projected/a6d85513-7b75-40e7-9eae-08544cccbc55-kube-api-access-2qf2l\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.524462 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6d85513-7b75-40e7-9eae-08544cccbc55-config-data\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.525556 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a6d85513-7b75-40e7-9eae-08544cccbc55-log-httpd\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.525868 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a6d85513-7b75-40e7-9eae-08544cccbc55-run-httpd\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.530033 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a6d85513-7b75-40e7-9eae-08544cccbc55-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.530100 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6d85513-7b75-40e7-9eae-08544cccbc55-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.530731 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6d85513-7b75-40e7-9eae-08544cccbc55-config-data\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.533209 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a6d85513-7b75-40e7-9eae-08544cccbc55-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.540563 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a6d85513-7b75-40e7-9eae-08544cccbc55-scripts\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.551175 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qf2l\" (UniqueName: \"kubernetes.io/projected/a6d85513-7b75-40e7-9eae-08544cccbc55-kube-api-access-2qf2l\") pod \"ceilometer-0\" (UID: \"a6d85513-7b75-40e7-9eae-08544cccbc55\") " pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.590057 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"49f372c6-68f7-45d2-a838-6336bb065fec","Type":"ContainerStarted","Data":"f3809015b7aefcbd35c4a7ce0fc826d9bd6f31ad8312d76cf28379e2f164b4d8"} Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.618680 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 13 06:52:25 crc kubenswrapper[5048]: I1213 06:52:25.732747 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-r4vpp"] Dec 13 06:52:26 crc kubenswrapper[5048]: I1213 06:52:26.130424 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 13 06:52:26 crc kubenswrapper[5048]: W1213 06:52:26.143566 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda6d85513_7b75_40e7_9eae_08544cccbc55.slice/crio-2f36cfb30d83781917e16a66f22cbf6294ad9e6c84232cccd858a8f0bd219862 WatchSource:0}: Error finding container 2f36cfb30d83781917e16a66f22cbf6294ad9e6c84232cccd858a8f0bd219862: Status 404 returned error can't find the container with id 2f36cfb30d83781917e16a66f22cbf6294ad9e6c84232cccd858a8f0bd219862 Dec 13 06:52:26 crc kubenswrapper[5048]: I1213 06:52:26.586745 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2071eed4-1f89-426e-ba3e-d79fad42a378" path="/var/lib/kubelet/pods/2071eed4-1f89-426e-ba3e-d79fad42a378/volumes" Dec 13 06:52:26 crc kubenswrapper[5048]: I1213 06:52:26.587594 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ee8f067-026a-4612-aba4-3cc8976dc02b" path="/var/lib/kubelet/pods/5ee8f067-026a-4612-aba4-3cc8976dc02b/volumes" Dec 13 06:52:26 crc kubenswrapper[5048]: I1213 06:52:26.640171 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"49f372c6-68f7-45d2-a838-6336bb065fec","Type":"ContainerStarted","Data":"816af06a0232627aa1f52fcb2dff1e1d1070cd69a89704972680e5d158b2bb30"} Dec 13 06:52:26 crc kubenswrapper[5048]: I1213 06:52:26.640215 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"49f372c6-68f7-45d2-a838-6336bb065fec","Type":"ContainerStarted","Data":"3a1104e49fd9c5c4be36287993036865ad21cdbff9828a32eb5b9c3634a1c995"} Dec 13 06:52:26 crc kubenswrapper[5048]: I1213 06:52:26.644675 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a6d85513-7b75-40e7-9eae-08544cccbc55","Type":"ContainerStarted","Data":"2f36cfb30d83781917e16a66f22cbf6294ad9e6c84232cccd858a8f0bd219862"} Dec 13 06:52:26 crc kubenswrapper[5048]: I1213 06:52:26.650164 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-r4vpp" event={"ID":"1b83ee75-f7b6-4c4e-9439-49b4646efc16","Type":"ContainerStarted","Data":"612298789e3bc2581412496aaca87fcd6bd3d941aa5ac33cc9659c35ad3ee290"} Dec 13 06:52:26 crc kubenswrapper[5048]: I1213 06:52:26.650251 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-r4vpp" event={"ID":"1b83ee75-f7b6-4c4e-9439-49b4646efc16","Type":"ContainerStarted","Data":"2bda63b61f0bf514cd5b9ff45bd4ef0f5cccbc9b43adb96e92cb8d47718fc5c4"} Dec 13 06:52:26 crc kubenswrapper[5048]: I1213 06:52:26.680214 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.680199107 podStartE2EDuration="2.680199107s" podCreationTimestamp="2025-12-13 06:52:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:52:26.679837047 +0000 UTC m=+1380.546431638" watchObservedRunningTime="2025-12-13 06:52:26.680199107 +0000 UTC m=+1380.546793678" Dec 13 06:52:26 crc kubenswrapper[5048]: I1213 06:52:26.705611 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-r4vpp" podStartSLOduration=2.70559231 podStartE2EDuration="2.70559231s" podCreationTimestamp="2025-12-13 06:52:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:52:26.694320896 +0000 UTC m=+1380.560915487" watchObservedRunningTime="2025-12-13 06:52:26.70559231 +0000 UTC m=+1380.572186891" Dec 13 06:52:27 crc kubenswrapper[5048]: I1213 06:52:27.665964 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a6d85513-7b75-40e7-9eae-08544cccbc55","Type":"ContainerStarted","Data":"14de4c7575a76f78a84b6383f48dd04b60763d9a2735328504cedc72bd79ccba"} Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.030582 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.104913 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-mqjdq"] Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.105215 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" podUID="4a966382-7a0b-439d-a1f0-9a08ae863aa0" containerName="dnsmasq-dns" containerID="cri-o://b271d4fc477426e860c372ceadf63bedb721b7d48fae352ba3bf1095822d04c6" gracePeriod=10 Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.662792 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.675688 5048 generic.go:334] "Generic (PLEG): container finished" podID="4a966382-7a0b-439d-a1f0-9a08ae863aa0" containerID="b271d4fc477426e860c372ceadf63bedb721b7d48fae352ba3bf1095822d04c6" exitCode=0 Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.675747 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" event={"ID":"4a966382-7a0b-439d-a1f0-9a08ae863aa0","Type":"ContainerDied","Data":"b271d4fc477426e860c372ceadf63bedb721b7d48fae352ba3bf1095822d04c6"} Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.675769 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.675809 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" event={"ID":"4a966382-7a0b-439d-a1f0-9a08ae863aa0","Type":"ContainerDied","Data":"34210ed7cf36c3dced2c4e965ae55b2274d8af6214309c79dd0979980713a35c"} Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.675828 5048 scope.go:117] "RemoveContainer" containerID="b271d4fc477426e860c372ceadf63bedb721b7d48fae352ba3bf1095822d04c6" Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.682539 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a6d85513-7b75-40e7-9eae-08544cccbc55","Type":"ContainerStarted","Data":"ca92963d57fc1bed42a441168101a85d6991176a973764502aee1e2276d3a02e"} Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.682618 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a6d85513-7b75-40e7-9eae-08544cccbc55","Type":"ContainerStarted","Data":"4cba851a1562bf329bbf2afca20dc94e2a572cf85f7c39c9fb1534cde9e1fc20"} Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.700705 5048 scope.go:117] "RemoveContainer" containerID="bce8de951878a4eccea558410f2fadc4eaa5eb9ac83684e6de4f177084142724" Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.722298 5048 scope.go:117] "RemoveContainer" containerID="b271d4fc477426e860c372ceadf63bedb721b7d48fae352ba3bf1095822d04c6" Dec 13 06:52:28 crc kubenswrapper[5048]: E1213 06:52:28.722916 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b271d4fc477426e860c372ceadf63bedb721b7d48fae352ba3bf1095822d04c6\": container with ID starting with b271d4fc477426e860c372ceadf63bedb721b7d48fae352ba3bf1095822d04c6 not found: ID does not exist" containerID="b271d4fc477426e860c372ceadf63bedb721b7d48fae352ba3bf1095822d04c6" Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.722968 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b271d4fc477426e860c372ceadf63bedb721b7d48fae352ba3bf1095822d04c6"} err="failed to get container status \"b271d4fc477426e860c372ceadf63bedb721b7d48fae352ba3bf1095822d04c6\": rpc error: code = NotFound desc = could not find container \"b271d4fc477426e860c372ceadf63bedb721b7d48fae352ba3bf1095822d04c6\": container with ID starting with b271d4fc477426e860c372ceadf63bedb721b7d48fae352ba3bf1095822d04c6 not found: ID does not exist" Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.723019 5048 scope.go:117] "RemoveContainer" containerID="bce8de951878a4eccea558410f2fadc4eaa5eb9ac83684e6de4f177084142724" Dec 13 06:52:28 crc kubenswrapper[5048]: E1213 06:52:28.723994 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bce8de951878a4eccea558410f2fadc4eaa5eb9ac83684e6de4f177084142724\": container with ID starting with bce8de951878a4eccea558410f2fadc4eaa5eb9ac83684e6de4f177084142724 not found: ID does not exist" containerID="bce8de951878a4eccea558410f2fadc4eaa5eb9ac83684e6de4f177084142724" Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.724047 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bce8de951878a4eccea558410f2fadc4eaa5eb9ac83684e6de4f177084142724"} err="failed to get container status \"bce8de951878a4eccea558410f2fadc4eaa5eb9ac83684e6de4f177084142724\": rpc error: code = NotFound desc = could not find container \"bce8de951878a4eccea558410f2fadc4eaa5eb9ac83684e6de4f177084142724\": container with ID starting with bce8de951878a4eccea558410f2fadc4eaa5eb9ac83684e6de4f177084142724 not found: ID does not exist" Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.800560 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-config\") pod \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.800634 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-dns-svc\") pod \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.800664 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-ovsdbserver-sb\") pod \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.800812 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-dns-swift-storage-0\") pod \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.800900 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wvlvl\" (UniqueName: \"kubernetes.io/projected/4a966382-7a0b-439d-a1f0-9a08ae863aa0-kube-api-access-wvlvl\") pod \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.801039 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-ovsdbserver-nb\") pod \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\" (UID: \"4a966382-7a0b-439d-a1f0-9a08ae863aa0\") " Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.815702 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a966382-7a0b-439d-a1f0-9a08ae863aa0-kube-api-access-wvlvl" (OuterVolumeSpecName: "kube-api-access-wvlvl") pod "4a966382-7a0b-439d-a1f0-9a08ae863aa0" (UID: "4a966382-7a0b-439d-a1f0-9a08ae863aa0"). InnerVolumeSpecName "kube-api-access-wvlvl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.868594 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-config" (OuterVolumeSpecName: "config") pod "4a966382-7a0b-439d-a1f0-9a08ae863aa0" (UID: "4a966382-7a0b-439d-a1f0-9a08ae863aa0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.879099 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "4a966382-7a0b-439d-a1f0-9a08ae863aa0" (UID: "4a966382-7a0b-439d-a1f0-9a08ae863aa0"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.898926 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4a966382-7a0b-439d-a1f0-9a08ae863aa0" (UID: "4a966382-7a0b-439d-a1f0-9a08ae863aa0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.905639 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.905670 5048 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.905680 5048 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.905690 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wvlvl\" (UniqueName: \"kubernetes.io/projected/4a966382-7a0b-439d-a1f0-9a08ae863aa0-kube-api-access-wvlvl\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.909816 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4a966382-7a0b-439d-a1f0-9a08ae863aa0" (UID: "4a966382-7a0b-439d-a1f0-9a08ae863aa0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:52:28 crc kubenswrapper[5048]: I1213 06:52:28.912915 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4a966382-7a0b-439d-a1f0-9a08ae863aa0" (UID: "4a966382-7a0b-439d-a1f0-9a08ae863aa0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:52:29 crc kubenswrapper[5048]: I1213 06:52:29.007854 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:29 crc kubenswrapper[5048]: I1213 06:52:29.007894 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4a966382-7a0b-439d-a1f0-9a08ae863aa0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:29 crc kubenswrapper[5048]: I1213 06:52:29.014146 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-mqjdq"] Dec 13 06:52:29 crc kubenswrapper[5048]: I1213 06:52:29.022677 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-mqjdq"] Dec 13 06:52:30 crc kubenswrapper[5048]: I1213 06:52:30.578721 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a966382-7a0b-439d-a1f0-9a08ae863aa0" path="/var/lib/kubelet/pods/4a966382-7a0b-439d-a1f0-9a08ae863aa0/volumes" Dec 13 06:52:30 crc kubenswrapper[5048]: I1213 06:52:30.714416 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a6d85513-7b75-40e7-9eae-08544cccbc55","Type":"ContainerStarted","Data":"1d1d932a4869080bce570ff9166c53c45e57dc776f4605b72eed4336a31debd7"} Dec 13 06:52:30 crc kubenswrapper[5048]: I1213 06:52:30.715291 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 13 06:52:30 crc kubenswrapper[5048]: I1213 06:52:30.738948 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.383219944 podStartE2EDuration="5.738931798s" podCreationTimestamp="2025-12-13 06:52:25 +0000 UTC" firstStartedPulling="2025-12-13 06:52:26.150126984 +0000 UTC m=+1380.016721555" lastFinishedPulling="2025-12-13 06:52:29.505838828 +0000 UTC m=+1383.372433409" observedRunningTime="2025-12-13 06:52:30.737388356 +0000 UTC m=+1384.603982957" watchObservedRunningTime="2025-12-13 06:52:30.738931798 +0000 UTC m=+1384.605526369" Dec 13 06:52:31 crc kubenswrapper[5048]: I1213 06:52:31.727373 5048 generic.go:334] "Generic (PLEG): container finished" podID="1b83ee75-f7b6-4c4e-9439-49b4646efc16" containerID="612298789e3bc2581412496aaca87fcd6bd3d941aa5ac33cc9659c35ad3ee290" exitCode=0 Dec 13 06:52:31 crc kubenswrapper[5048]: I1213 06:52:31.727746 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-r4vpp" event={"ID":"1b83ee75-f7b6-4c4e-9439-49b4646efc16","Type":"ContainerDied","Data":"612298789e3bc2581412496aaca87fcd6bd3d941aa5ac33cc9659c35ad3ee290"} Dec 13 06:52:33 crc kubenswrapper[5048]: I1213 06:52:33.062577 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-r4vpp" Dec 13 06:52:33 crc kubenswrapper[5048]: I1213 06:52:33.209417 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ghm5t\" (UniqueName: \"kubernetes.io/projected/1b83ee75-f7b6-4c4e-9439-49b4646efc16-kube-api-access-ghm5t\") pod \"1b83ee75-f7b6-4c4e-9439-49b4646efc16\" (UID: \"1b83ee75-f7b6-4c4e-9439-49b4646efc16\") " Dec 13 06:52:33 crc kubenswrapper[5048]: I1213 06:52:33.209501 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b83ee75-f7b6-4c4e-9439-49b4646efc16-config-data\") pod \"1b83ee75-f7b6-4c4e-9439-49b4646efc16\" (UID: \"1b83ee75-f7b6-4c4e-9439-49b4646efc16\") " Dec 13 06:52:33 crc kubenswrapper[5048]: I1213 06:52:33.209653 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b83ee75-f7b6-4c4e-9439-49b4646efc16-scripts\") pod \"1b83ee75-f7b6-4c4e-9439-49b4646efc16\" (UID: \"1b83ee75-f7b6-4c4e-9439-49b4646efc16\") " Dec 13 06:52:33 crc kubenswrapper[5048]: I1213 06:52:33.209743 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b83ee75-f7b6-4c4e-9439-49b4646efc16-combined-ca-bundle\") pod \"1b83ee75-f7b6-4c4e-9439-49b4646efc16\" (UID: \"1b83ee75-f7b6-4c4e-9439-49b4646efc16\") " Dec 13 06:52:33 crc kubenswrapper[5048]: I1213 06:52:33.215678 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b83ee75-f7b6-4c4e-9439-49b4646efc16-kube-api-access-ghm5t" (OuterVolumeSpecName: "kube-api-access-ghm5t") pod "1b83ee75-f7b6-4c4e-9439-49b4646efc16" (UID: "1b83ee75-f7b6-4c4e-9439-49b4646efc16"). InnerVolumeSpecName "kube-api-access-ghm5t". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:52:33 crc kubenswrapper[5048]: I1213 06:52:33.219699 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b83ee75-f7b6-4c4e-9439-49b4646efc16-scripts" (OuterVolumeSpecName: "scripts") pod "1b83ee75-f7b6-4c4e-9439-49b4646efc16" (UID: "1b83ee75-f7b6-4c4e-9439-49b4646efc16"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:33 crc kubenswrapper[5048]: I1213 06:52:33.243733 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b83ee75-f7b6-4c4e-9439-49b4646efc16-config-data" (OuterVolumeSpecName: "config-data") pod "1b83ee75-f7b6-4c4e-9439-49b4646efc16" (UID: "1b83ee75-f7b6-4c4e-9439-49b4646efc16"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:33 crc kubenswrapper[5048]: I1213 06:52:33.264031 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b83ee75-f7b6-4c4e-9439-49b4646efc16-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1b83ee75-f7b6-4c4e-9439-49b4646efc16" (UID: "1b83ee75-f7b6-4c4e-9439-49b4646efc16"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:33 crc kubenswrapper[5048]: I1213 06:52:33.312285 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ghm5t\" (UniqueName: \"kubernetes.io/projected/1b83ee75-f7b6-4c4e-9439-49b4646efc16-kube-api-access-ghm5t\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:33 crc kubenswrapper[5048]: I1213 06:52:33.312327 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b83ee75-f7b6-4c4e-9439-49b4646efc16-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:33 crc kubenswrapper[5048]: I1213 06:52:33.312340 5048 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b83ee75-f7b6-4c4e-9439-49b4646efc16-scripts\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:33 crc kubenswrapper[5048]: I1213 06:52:33.312350 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b83ee75-f7b6-4c4e-9439-49b4646efc16-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:33 crc kubenswrapper[5048]: I1213 06:52:33.395629 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-bccf8f775-mqjdq" podUID="4a966382-7a0b-439d-a1f0-9a08ae863aa0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.187:5353: i/o timeout" Dec 13 06:52:33 crc kubenswrapper[5048]: I1213 06:52:33.755818 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-r4vpp" event={"ID":"1b83ee75-f7b6-4c4e-9439-49b4646efc16","Type":"ContainerDied","Data":"2bda63b61f0bf514cd5b9ff45bd4ef0f5cccbc9b43adb96e92cb8d47718fc5c4"} Dec 13 06:52:33 crc kubenswrapper[5048]: I1213 06:52:33.755955 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2bda63b61f0bf514cd5b9ff45bd4ef0f5cccbc9b43adb96e92cb8d47718fc5c4" Dec 13 06:52:33 crc kubenswrapper[5048]: I1213 06:52:33.755934 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-r4vpp" Dec 13 06:52:33 crc kubenswrapper[5048]: I1213 06:52:33.947627 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 13 06:52:33 crc kubenswrapper[5048]: I1213 06:52:33.947931 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="49f372c6-68f7-45d2-a838-6336bb065fec" containerName="nova-api-log" containerID="cri-o://3a1104e49fd9c5c4be36287993036865ad21cdbff9828a32eb5b9c3634a1c995" gracePeriod=30 Dec 13 06:52:33 crc kubenswrapper[5048]: I1213 06:52:33.948362 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="49f372c6-68f7-45d2-a838-6336bb065fec" containerName="nova-api-api" containerID="cri-o://816af06a0232627aa1f52fcb2dff1e1d1070cd69a89704972680e5d158b2bb30" gracePeriod=30 Dec 13 06:52:33 crc kubenswrapper[5048]: I1213 06:52:33.956727 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 13 06:52:33 crc kubenswrapper[5048]: I1213 06:52:33.956979 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="affd50c3-a8a2-4925-a7b5-5e6836c0c8cf" containerName="nova-scheduler-scheduler" containerID="cri-o://be559a913b705ae5860ff331bbc9ceda2812f21d11c631de2d2e078ab8e853b9" gracePeriod=30 Dec 13 06:52:34 crc kubenswrapper[5048]: I1213 06:52:34.024095 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 13 06:52:34 crc kubenswrapper[5048]: I1213 06:52:34.024466 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="074ecf6d-bc26-46eb-8cf0-ffcf4612a773" containerName="nova-metadata-log" containerID="cri-o://e5a1dd28b1e0344a486750bb5885047e1a5a633bb8ad17100e85358e976cb9f8" gracePeriod=30 Dec 13 06:52:34 crc kubenswrapper[5048]: I1213 06:52:34.024462 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="074ecf6d-bc26-46eb-8cf0-ffcf4612a773" containerName="nova-metadata-metadata" containerID="cri-o://f0a995fa6063730169e9bc8f6bd76c3aa3fdfc5b0641b5488c561c19736e2f87" gracePeriod=30 Dec 13 06:52:34 crc kubenswrapper[5048]: I1213 06:52:34.781255 5048 generic.go:334] "Generic (PLEG): container finished" podID="49f372c6-68f7-45d2-a838-6336bb065fec" containerID="816af06a0232627aa1f52fcb2dff1e1d1070cd69a89704972680e5d158b2bb30" exitCode=0 Dec 13 06:52:34 crc kubenswrapper[5048]: I1213 06:52:34.781648 5048 generic.go:334] "Generic (PLEG): container finished" podID="49f372c6-68f7-45d2-a838-6336bb065fec" containerID="3a1104e49fd9c5c4be36287993036865ad21cdbff9828a32eb5b9c3634a1c995" exitCode=143 Dec 13 06:52:34 crc kubenswrapper[5048]: I1213 06:52:34.781487 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"49f372c6-68f7-45d2-a838-6336bb065fec","Type":"ContainerDied","Data":"816af06a0232627aa1f52fcb2dff1e1d1070cd69a89704972680e5d158b2bb30"} Dec 13 06:52:34 crc kubenswrapper[5048]: I1213 06:52:34.781733 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"49f372c6-68f7-45d2-a838-6336bb065fec","Type":"ContainerDied","Data":"3a1104e49fd9c5c4be36287993036865ad21cdbff9828a32eb5b9c3634a1c995"} Dec 13 06:52:34 crc kubenswrapper[5048]: I1213 06:52:34.785426 5048 generic.go:334] "Generic (PLEG): container finished" podID="074ecf6d-bc26-46eb-8cf0-ffcf4612a773" containerID="e5a1dd28b1e0344a486750bb5885047e1a5a633bb8ad17100e85358e976cb9f8" exitCode=143 Dec 13 06:52:34 crc kubenswrapper[5048]: I1213 06:52:34.785472 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"074ecf6d-bc26-46eb-8cf0-ffcf4612a773","Type":"ContainerDied","Data":"e5a1dd28b1e0344a486750bb5885047e1a5a633bb8ad17100e85358e976cb9f8"} Dec 13 06:52:34 crc kubenswrapper[5048]: I1213 06:52:34.928261 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.041507 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-combined-ca-bundle\") pod \"49f372c6-68f7-45d2-a838-6336bb065fec\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.041589 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-config-data\") pod \"49f372c6-68f7-45d2-a838-6336bb065fec\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.041674 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-public-tls-certs\") pod \"49f372c6-68f7-45d2-a838-6336bb065fec\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.041790 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49f372c6-68f7-45d2-a838-6336bb065fec-logs\") pod \"49f372c6-68f7-45d2-a838-6336bb065fec\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.041894 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzkww\" (UniqueName: \"kubernetes.io/projected/49f372c6-68f7-45d2-a838-6336bb065fec-kube-api-access-lzkww\") pod \"49f372c6-68f7-45d2-a838-6336bb065fec\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.041930 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-internal-tls-certs\") pod \"49f372c6-68f7-45d2-a838-6336bb065fec\" (UID: \"49f372c6-68f7-45d2-a838-6336bb065fec\") " Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.042701 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49f372c6-68f7-45d2-a838-6336bb065fec-logs" (OuterVolumeSpecName: "logs") pod "49f372c6-68f7-45d2-a838-6336bb065fec" (UID: "49f372c6-68f7-45d2-a838-6336bb065fec"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.052595 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49f372c6-68f7-45d2-a838-6336bb065fec-kube-api-access-lzkww" (OuterVolumeSpecName: "kube-api-access-lzkww") pod "49f372c6-68f7-45d2-a838-6336bb065fec" (UID: "49f372c6-68f7-45d2-a838-6336bb065fec"). InnerVolumeSpecName "kube-api-access-lzkww". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.091198 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "49f372c6-68f7-45d2-a838-6336bb065fec" (UID: "49f372c6-68f7-45d2-a838-6336bb065fec"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.097973 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-config-data" (OuterVolumeSpecName: "config-data") pod "49f372c6-68f7-45d2-a838-6336bb065fec" (UID: "49f372c6-68f7-45d2-a838-6336bb065fec"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.144735 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.144774 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.144788 5048 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49f372c6-68f7-45d2-a838-6336bb065fec-logs\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.144800 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzkww\" (UniqueName: \"kubernetes.io/projected/49f372c6-68f7-45d2-a838-6336bb065fec-kube-api-access-lzkww\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.155344 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "49f372c6-68f7-45d2-a838-6336bb065fec" (UID: "49f372c6-68f7-45d2-a838-6336bb065fec"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.188703 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "49f372c6-68f7-45d2-a838-6336bb065fec" (UID: "49f372c6-68f7-45d2-a838-6336bb065fec"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.252047 5048 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.252081 5048 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/49f372c6-68f7-45d2-a838-6336bb065fec-public-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.301887 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.353275 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xj2xg\" (UniqueName: \"kubernetes.io/projected/affd50c3-a8a2-4925-a7b5-5e6836c0c8cf-kube-api-access-xj2xg\") pod \"affd50c3-a8a2-4925-a7b5-5e6836c0c8cf\" (UID: \"affd50c3-a8a2-4925-a7b5-5e6836c0c8cf\") " Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.353816 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/affd50c3-a8a2-4925-a7b5-5e6836c0c8cf-config-data\") pod \"affd50c3-a8a2-4925-a7b5-5e6836c0c8cf\" (UID: \"affd50c3-a8a2-4925-a7b5-5e6836c0c8cf\") " Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.353915 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/affd50c3-a8a2-4925-a7b5-5e6836c0c8cf-combined-ca-bundle\") pod \"affd50c3-a8a2-4925-a7b5-5e6836c0c8cf\" (UID: \"affd50c3-a8a2-4925-a7b5-5e6836c0c8cf\") " Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.358633 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/affd50c3-a8a2-4925-a7b5-5e6836c0c8cf-kube-api-access-xj2xg" (OuterVolumeSpecName: "kube-api-access-xj2xg") pod "affd50c3-a8a2-4925-a7b5-5e6836c0c8cf" (UID: "affd50c3-a8a2-4925-a7b5-5e6836c0c8cf"). InnerVolumeSpecName "kube-api-access-xj2xg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.403211 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/affd50c3-a8a2-4925-a7b5-5e6836c0c8cf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "affd50c3-a8a2-4925-a7b5-5e6836c0c8cf" (UID: "affd50c3-a8a2-4925-a7b5-5e6836c0c8cf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.420758 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/affd50c3-a8a2-4925-a7b5-5e6836c0c8cf-config-data" (OuterVolumeSpecName: "config-data") pod "affd50c3-a8a2-4925-a7b5-5e6836c0c8cf" (UID: "affd50c3-a8a2-4925-a7b5-5e6836c0c8cf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.456840 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/affd50c3-a8a2-4925-a7b5-5e6836c0c8cf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.456886 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xj2xg\" (UniqueName: \"kubernetes.io/projected/affd50c3-a8a2-4925-a7b5-5e6836c0c8cf-kube-api-access-xj2xg\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.456900 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/affd50c3-a8a2-4925-a7b5-5e6836c0c8cf-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.795356 5048 generic.go:334] "Generic (PLEG): container finished" podID="affd50c3-a8a2-4925-a7b5-5e6836c0c8cf" containerID="be559a913b705ae5860ff331bbc9ceda2812f21d11c631de2d2e078ab8e853b9" exitCode=0 Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.795559 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"affd50c3-a8a2-4925-a7b5-5e6836c0c8cf","Type":"ContainerDied","Data":"be559a913b705ae5860ff331bbc9ceda2812f21d11c631de2d2e078ab8e853b9"} Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.795774 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"affd50c3-a8a2-4925-a7b5-5e6836c0c8cf","Type":"ContainerDied","Data":"c5aa11a9598e78f909b47a0fa54418d9c4dd084c09a6deccdc4c4f3b51149051"} Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.795619 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.795796 5048 scope.go:117] "RemoveContainer" containerID="be559a913b705ae5860ff331bbc9ceda2812f21d11c631de2d2e078ab8e853b9" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.799376 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"49f372c6-68f7-45d2-a838-6336bb065fec","Type":"ContainerDied","Data":"f3809015b7aefcbd35c4a7ce0fc826d9bd6f31ad8312d76cf28379e2f164b4d8"} Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.799493 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.819020 5048 scope.go:117] "RemoveContainer" containerID="be559a913b705ae5860ff331bbc9ceda2812f21d11c631de2d2e078ab8e853b9" Dec 13 06:52:35 crc kubenswrapper[5048]: E1213 06:52:35.819548 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be559a913b705ae5860ff331bbc9ceda2812f21d11c631de2d2e078ab8e853b9\": container with ID starting with be559a913b705ae5860ff331bbc9ceda2812f21d11c631de2d2e078ab8e853b9 not found: ID does not exist" containerID="be559a913b705ae5860ff331bbc9ceda2812f21d11c631de2d2e078ab8e853b9" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.819607 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be559a913b705ae5860ff331bbc9ceda2812f21d11c631de2d2e078ab8e853b9"} err="failed to get container status \"be559a913b705ae5860ff331bbc9ceda2812f21d11c631de2d2e078ab8e853b9\": rpc error: code = NotFound desc = could not find container \"be559a913b705ae5860ff331bbc9ceda2812f21d11c631de2d2e078ab8e853b9\": container with ID starting with be559a913b705ae5860ff331bbc9ceda2812f21d11c631de2d2e078ab8e853b9 not found: ID does not exist" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.819643 5048 scope.go:117] "RemoveContainer" containerID="816af06a0232627aa1f52fcb2dff1e1d1070cd69a89704972680e5d158b2bb30" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.843602 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.858360 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.860017 5048 scope.go:117] "RemoveContainer" containerID="3a1104e49fd9c5c4be36287993036865ad21cdbff9828a32eb5b9c3634a1c995" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.868641 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.886152 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 13 06:52:35 crc kubenswrapper[5048]: E1213 06:52:35.886856 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49f372c6-68f7-45d2-a838-6336bb065fec" containerName="nova-api-log" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.886883 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="49f372c6-68f7-45d2-a838-6336bb065fec" containerName="nova-api-log" Dec 13 06:52:35 crc kubenswrapper[5048]: E1213 06:52:35.886906 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b83ee75-f7b6-4c4e-9439-49b4646efc16" containerName="nova-manage" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.886913 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b83ee75-f7b6-4c4e-9439-49b4646efc16" containerName="nova-manage" Dec 13 06:52:35 crc kubenswrapper[5048]: E1213 06:52:35.886933 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="affd50c3-a8a2-4925-a7b5-5e6836c0c8cf" containerName="nova-scheduler-scheduler" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.886941 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="affd50c3-a8a2-4925-a7b5-5e6836c0c8cf" containerName="nova-scheduler-scheduler" Dec 13 06:52:35 crc kubenswrapper[5048]: E1213 06:52:35.886960 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a966382-7a0b-439d-a1f0-9a08ae863aa0" containerName="dnsmasq-dns" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.886967 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a966382-7a0b-439d-a1f0-9a08ae863aa0" containerName="dnsmasq-dns" Dec 13 06:52:35 crc kubenswrapper[5048]: E1213 06:52:35.886979 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a966382-7a0b-439d-a1f0-9a08ae863aa0" containerName="init" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.886986 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a966382-7a0b-439d-a1f0-9a08ae863aa0" containerName="init" Dec 13 06:52:35 crc kubenswrapper[5048]: E1213 06:52:35.886999 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49f372c6-68f7-45d2-a838-6336bb065fec" containerName="nova-api-api" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.887005 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="49f372c6-68f7-45d2-a838-6336bb065fec" containerName="nova-api-api" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.887223 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="49f372c6-68f7-45d2-a838-6336bb065fec" containerName="nova-api-log" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.887245 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="49f372c6-68f7-45d2-a838-6336bb065fec" containerName="nova-api-api" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.887261 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b83ee75-f7b6-4c4e-9439-49b4646efc16" containerName="nova-manage" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.887283 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a966382-7a0b-439d-a1f0-9a08ae863aa0" containerName="dnsmasq-dns" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.887296 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="affd50c3-a8a2-4925-a7b5-5e6836c0c8cf" containerName="nova-scheduler-scheduler" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.888001 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.898634 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.912422 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.912867 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.931384 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.948654 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.954530 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.954758 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.954952 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.960224 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.968070 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf17e7ba-3f68-4aca-ae8e-fb82f10f18d4-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"bf17e7ba-3f68-4aca-ae8e-fb82f10f18d4\") " pod="openstack/nova-scheduler-0" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.968381 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40f60c37-7e8e-4a30-9d38-26296975a60c-logs\") pod \"nova-api-0\" (UID: \"40f60c37-7e8e-4a30-9d38-26296975a60c\") " pod="openstack/nova-api-0" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.968410 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9n4jt\" (UniqueName: \"kubernetes.io/projected/40f60c37-7e8e-4a30-9d38-26296975a60c-kube-api-access-9n4jt\") pod \"nova-api-0\" (UID: \"40f60c37-7e8e-4a30-9d38-26296975a60c\") " pod="openstack/nova-api-0" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.968455 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40f60c37-7e8e-4a30-9d38-26296975a60c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"40f60c37-7e8e-4a30-9d38-26296975a60c\") " pod="openstack/nova-api-0" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.968494 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/40f60c37-7e8e-4a30-9d38-26296975a60c-public-tls-certs\") pod \"nova-api-0\" (UID: \"40f60c37-7e8e-4a30-9d38-26296975a60c\") " pod="openstack/nova-api-0" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.968550 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/40f60c37-7e8e-4a30-9d38-26296975a60c-internal-tls-certs\") pod \"nova-api-0\" (UID: \"40f60c37-7e8e-4a30-9d38-26296975a60c\") " pod="openstack/nova-api-0" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.968573 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf17e7ba-3f68-4aca-ae8e-fb82f10f18d4-config-data\") pod \"nova-scheduler-0\" (UID: \"bf17e7ba-3f68-4aca-ae8e-fb82f10f18d4\") " pod="openstack/nova-scheduler-0" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.968643 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40f60c37-7e8e-4a30-9d38-26296975a60c-config-data\") pod \"nova-api-0\" (UID: \"40f60c37-7e8e-4a30-9d38-26296975a60c\") " pod="openstack/nova-api-0" Dec 13 06:52:35 crc kubenswrapper[5048]: I1213 06:52:35.968675 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x2zm8\" (UniqueName: \"kubernetes.io/projected/bf17e7ba-3f68-4aca-ae8e-fb82f10f18d4-kube-api-access-x2zm8\") pod \"nova-scheduler-0\" (UID: \"bf17e7ba-3f68-4aca-ae8e-fb82f10f18d4\") " pod="openstack/nova-scheduler-0" Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.070569 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf17e7ba-3f68-4aca-ae8e-fb82f10f18d4-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"bf17e7ba-3f68-4aca-ae8e-fb82f10f18d4\") " pod="openstack/nova-scheduler-0" Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.070740 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40f60c37-7e8e-4a30-9d38-26296975a60c-logs\") pod \"nova-api-0\" (UID: \"40f60c37-7e8e-4a30-9d38-26296975a60c\") " pod="openstack/nova-api-0" Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.070771 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9n4jt\" (UniqueName: \"kubernetes.io/projected/40f60c37-7e8e-4a30-9d38-26296975a60c-kube-api-access-9n4jt\") pod \"nova-api-0\" (UID: \"40f60c37-7e8e-4a30-9d38-26296975a60c\") " pod="openstack/nova-api-0" Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.070799 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40f60c37-7e8e-4a30-9d38-26296975a60c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"40f60c37-7e8e-4a30-9d38-26296975a60c\") " pod="openstack/nova-api-0" Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.070825 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/40f60c37-7e8e-4a30-9d38-26296975a60c-public-tls-certs\") pod \"nova-api-0\" (UID: \"40f60c37-7e8e-4a30-9d38-26296975a60c\") " pod="openstack/nova-api-0" Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.070870 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/40f60c37-7e8e-4a30-9d38-26296975a60c-internal-tls-certs\") pod \"nova-api-0\" (UID: \"40f60c37-7e8e-4a30-9d38-26296975a60c\") " pod="openstack/nova-api-0" Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.070891 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf17e7ba-3f68-4aca-ae8e-fb82f10f18d4-config-data\") pod \"nova-scheduler-0\" (UID: \"bf17e7ba-3f68-4aca-ae8e-fb82f10f18d4\") " pod="openstack/nova-scheduler-0" Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.070938 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40f60c37-7e8e-4a30-9d38-26296975a60c-config-data\") pod \"nova-api-0\" (UID: \"40f60c37-7e8e-4a30-9d38-26296975a60c\") " pod="openstack/nova-api-0" Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.070968 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x2zm8\" (UniqueName: \"kubernetes.io/projected/bf17e7ba-3f68-4aca-ae8e-fb82f10f18d4-kube-api-access-x2zm8\") pod \"nova-scheduler-0\" (UID: \"bf17e7ba-3f68-4aca-ae8e-fb82f10f18d4\") " pod="openstack/nova-scheduler-0" Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.071381 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40f60c37-7e8e-4a30-9d38-26296975a60c-logs\") pod \"nova-api-0\" (UID: \"40f60c37-7e8e-4a30-9d38-26296975a60c\") " pod="openstack/nova-api-0" Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.076885 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf17e7ba-3f68-4aca-ae8e-fb82f10f18d4-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"bf17e7ba-3f68-4aca-ae8e-fb82f10f18d4\") " pod="openstack/nova-scheduler-0" Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.077022 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf17e7ba-3f68-4aca-ae8e-fb82f10f18d4-config-data\") pod \"nova-scheduler-0\" (UID: \"bf17e7ba-3f68-4aca-ae8e-fb82f10f18d4\") " pod="openstack/nova-scheduler-0" Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.077372 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40f60c37-7e8e-4a30-9d38-26296975a60c-config-data\") pod \"nova-api-0\" (UID: \"40f60c37-7e8e-4a30-9d38-26296975a60c\") " pod="openstack/nova-api-0" Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.077665 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/40f60c37-7e8e-4a30-9d38-26296975a60c-public-tls-certs\") pod \"nova-api-0\" (UID: \"40f60c37-7e8e-4a30-9d38-26296975a60c\") " pod="openstack/nova-api-0" Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.078265 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/40f60c37-7e8e-4a30-9d38-26296975a60c-internal-tls-certs\") pod \"nova-api-0\" (UID: \"40f60c37-7e8e-4a30-9d38-26296975a60c\") " pod="openstack/nova-api-0" Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.087729 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x2zm8\" (UniqueName: \"kubernetes.io/projected/bf17e7ba-3f68-4aca-ae8e-fb82f10f18d4-kube-api-access-x2zm8\") pod \"nova-scheduler-0\" (UID: \"bf17e7ba-3f68-4aca-ae8e-fb82f10f18d4\") " pod="openstack/nova-scheduler-0" Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.090129 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40f60c37-7e8e-4a30-9d38-26296975a60c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"40f60c37-7e8e-4a30-9d38-26296975a60c\") " pod="openstack/nova-api-0" Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.093339 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9n4jt\" (UniqueName: \"kubernetes.io/projected/40f60c37-7e8e-4a30-9d38-26296975a60c-kube-api-access-9n4jt\") pod \"nova-api-0\" (UID: \"40f60c37-7e8e-4a30-9d38-26296975a60c\") " pod="openstack/nova-api-0" Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.228338 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.266196 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.577807 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49f372c6-68f7-45d2-a838-6336bb065fec" path="/var/lib/kubelet/pods/49f372c6-68f7-45d2-a838-6336bb065fec/volumes" Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.579033 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="affd50c3-a8a2-4925-a7b5-5e6836c0c8cf" path="/var/lib/kubelet/pods/affd50c3-a8a2-4925-a7b5-5e6836c0c8cf/volumes" Dec 13 06:52:36 crc kubenswrapper[5048]: W1213 06:52:36.710598 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbf17e7ba_3f68_4aca_ae8e_fb82f10f18d4.slice/crio-eba23e83d60fc5445fe6252716b2bb7a3c1d6d9fc818cdd90b41ac5ad678f060 WatchSource:0}: Error finding container eba23e83d60fc5445fe6252716b2bb7a3c1d6d9fc818cdd90b41ac5ad678f060: Status 404 returned error can't find the container with id eba23e83d60fc5445fe6252716b2bb7a3c1d6d9fc818cdd90b41ac5ad678f060 Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.711086 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.809902 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 13 06:52:36 crc kubenswrapper[5048]: I1213 06:52:36.811408 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"bf17e7ba-3f68-4aca-ae8e-fb82f10f18d4","Type":"ContainerStarted","Data":"eba23e83d60fc5445fe6252716b2bb7a3c1d6d9fc818cdd90b41ac5ad678f060"} Dec 13 06:52:36 crc kubenswrapper[5048]: W1213 06:52:36.821666 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod40f60c37_7e8e_4a30_9d38_26296975a60c.slice/crio-951fd33dc6cc9851ad2649e25f76fb593646f4d1cc72b1618c71883d8d3a099c WatchSource:0}: Error finding container 951fd33dc6cc9851ad2649e25f76fb593646f4d1cc72b1618c71883d8d3a099c: Status 404 returned error can't find the container with id 951fd33dc6cc9851ad2649e25f76fb593646f4d1cc72b1618c71883d8d3a099c Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.804540 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.831577 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"40f60c37-7e8e-4a30-9d38-26296975a60c","Type":"ContainerStarted","Data":"22a4672b65128cc9808e961eda75992d3292a06195902eb77afabe0da75ae202"} Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.831620 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"40f60c37-7e8e-4a30-9d38-26296975a60c","Type":"ContainerStarted","Data":"97a925ccde2b631d771b96892f1f326b015a27a02188a8a57f04581a3aaedda3"} Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.831629 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"40f60c37-7e8e-4a30-9d38-26296975a60c","Type":"ContainerStarted","Data":"951fd33dc6cc9851ad2649e25f76fb593646f4d1cc72b1618c71883d8d3a099c"} Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.868165 5048 generic.go:334] "Generic (PLEG): container finished" podID="074ecf6d-bc26-46eb-8cf0-ffcf4612a773" containerID="f0a995fa6063730169e9bc8f6bd76c3aa3fdfc5b0641b5488c561c19736e2f87" exitCode=0 Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.868243 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"074ecf6d-bc26-46eb-8cf0-ffcf4612a773","Type":"ContainerDied","Data":"f0a995fa6063730169e9bc8f6bd76c3aa3fdfc5b0641b5488c561c19736e2f87"} Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.868275 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"074ecf6d-bc26-46eb-8cf0-ffcf4612a773","Type":"ContainerDied","Data":"f88c9ea72f56a0b932492af1f5da310eed82d55054a320cf63dd4c65262674b5"} Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.868297 5048 scope.go:117] "RemoveContainer" containerID="f0a995fa6063730169e9bc8f6bd76c3aa3fdfc5b0641b5488c561c19736e2f87" Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.868331 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.871240 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"bf17e7ba-3f68-4aca-ae8e-fb82f10f18d4","Type":"ContainerStarted","Data":"6e8ea9a2160af44957b3aff721109967a1497a6bbea1830b250b778f27722284"} Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.893873 5048 scope.go:117] "RemoveContainer" containerID="e5a1dd28b1e0344a486750bb5885047e1a5a633bb8ad17100e85358e976cb9f8" Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.901594 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.901572699 podStartE2EDuration="2.901572699s" podCreationTimestamp="2025-12-13 06:52:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:52:37.890169522 +0000 UTC m=+1391.756764113" watchObservedRunningTime="2025-12-13 06:52:37.901572699 +0000 UTC m=+1391.768167280" Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.918860 5048 scope.go:117] "RemoveContainer" containerID="f0a995fa6063730169e9bc8f6bd76c3aa3fdfc5b0641b5488c561c19736e2f87" Dec 13 06:52:37 crc kubenswrapper[5048]: E1213 06:52:37.919467 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f0a995fa6063730169e9bc8f6bd76c3aa3fdfc5b0641b5488c561c19736e2f87\": container with ID starting with f0a995fa6063730169e9bc8f6bd76c3aa3fdfc5b0641b5488c561c19736e2f87 not found: ID does not exist" containerID="f0a995fa6063730169e9bc8f6bd76c3aa3fdfc5b0641b5488c561c19736e2f87" Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.919498 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0a995fa6063730169e9bc8f6bd76c3aa3fdfc5b0641b5488c561c19736e2f87"} err="failed to get container status \"f0a995fa6063730169e9bc8f6bd76c3aa3fdfc5b0641b5488c561c19736e2f87\": rpc error: code = NotFound desc = could not find container \"f0a995fa6063730169e9bc8f6bd76c3aa3fdfc5b0641b5488c561c19736e2f87\": container with ID starting with f0a995fa6063730169e9bc8f6bd76c3aa3fdfc5b0641b5488c561c19736e2f87 not found: ID does not exist" Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.919518 5048 scope.go:117] "RemoveContainer" containerID="e5a1dd28b1e0344a486750bb5885047e1a5a633bb8ad17100e85358e976cb9f8" Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.920541 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.920530038 podStartE2EDuration="2.920530038s" podCreationTimestamp="2025-12-13 06:52:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:52:37.90607417 +0000 UTC m=+1391.772668751" watchObservedRunningTime="2025-12-13 06:52:37.920530038 +0000 UTC m=+1391.787124619" Dec 13 06:52:37 crc kubenswrapper[5048]: E1213 06:52:37.920651 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5a1dd28b1e0344a486750bb5885047e1a5a633bb8ad17100e85358e976cb9f8\": container with ID starting with e5a1dd28b1e0344a486750bb5885047e1a5a633bb8ad17100e85358e976cb9f8 not found: ID does not exist" containerID="e5a1dd28b1e0344a486750bb5885047e1a5a633bb8ad17100e85358e976cb9f8" Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.920698 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5a1dd28b1e0344a486750bb5885047e1a5a633bb8ad17100e85358e976cb9f8"} err="failed to get container status \"e5a1dd28b1e0344a486750bb5885047e1a5a633bb8ad17100e85358e976cb9f8\": rpc error: code = NotFound desc = could not find container \"e5a1dd28b1e0344a486750bb5885047e1a5a633bb8ad17100e85358e976cb9f8\": container with ID starting with e5a1dd28b1e0344a486750bb5885047e1a5a633bb8ad17100e85358e976cb9f8 not found: ID does not exist" Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.920997 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hmgwv\" (UniqueName: \"kubernetes.io/projected/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-kube-api-access-hmgwv\") pod \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\" (UID: \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\") " Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.921137 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-combined-ca-bundle\") pod \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\" (UID: \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\") " Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.921186 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-logs\") pod \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\" (UID: \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\") " Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.921230 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-config-data\") pod \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\" (UID: \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\") " Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.921263 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-nova-metadata-tls-certs\") pod \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\" (UID: \"074ecf6d-bc26-46eb-8cf0-ffcf4612a773\") " Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.921849 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-logs" (OuterVolumeSpecName: "logs") pod "074ecf6d-bc26-46eb-8cf0-ffcf4612a773" (UID: "074ecf6d-bc26-46eb-8cf0-ffcf4612a773"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.926680 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-kube-api-access-hmgwv" (OuterVolumeSpecName: "kube-api-access-hmgwv") pod "074ecf6d-bc26-46eb-8cf0-ffcf4612a773" (UID: "074ecf6d-bc26-46eb-8cf0-ffcf4612a773"). InnerVolumeSpecName "kube-api-access-hmgwv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.949347 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "074ecf6d-bc26-46eb-8cf0-ffcf4612a773" (UID: "074ecf6d-bc26-46eb-8cf0-ffcf4612a773"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.955128 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-config-data" (OuterVolumeSpecName: "config-data") pod "074ecf6d-bc26-46eb-8cf0-ffcf4612a773" (UID: "074ecf6d-bc26-46eb-8cf0-ffcf4612a773"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:37 crc kubenswrapper[5048]: I1213 06:52:37.971245 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "074ecf6d-bc26-46eb-8cf0-ffcf4612a773" (UID: "074ecf6d-bc26-46eb-8cf0-ffcf4612a773"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.023217 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.023257 5048 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-logs\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.023273 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.023287 5048 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.023298 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hmgwv\" (UniqueName: \"kubernetes.io/projected/074ecf6d-bc26-46eb-8cf0-ffcf4612a773-kube-api-access-hmgwv\") on node \"crc\" DevicePath \"\"" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.214976 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.232281 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.258883 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 13 06:52:38 crc kubenswrapper[5048]: E1213 06:52:38.259337 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="074ecf6d-bc26-46eb-8cf0-ffcf4612a773" containerName="nova-metadata-metadata" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.259359 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="074ecf6d-bc26-46eb-8cf0-ffcf4612a773" containerName="nova-metadata-metadata" Dec 13 06:52:38 crc kubenswrapper[5048]: E1213 06:52:38.259407 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="074ecf6d-bc26-46eb-8cf0-ffcf4612a773" containerName="nova-metadata-log" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.259417 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="074ecf6d-bc26-46eb-8cf0-ffcf4612a773" containerName="nova-metadata-log" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.259676 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="074ecf6d-bc26-46eb-8cf0-ffcf4612a773" containerName="nova-metadata-metadata" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.259705 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="074ecf6d-bc26-46eb-8cf0-ffcf4612a773" containerName="nova-metadata-log" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.260891 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.263300 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.263731 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.285346 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.349494 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9502006-ca4a-4a71-90ef-4f86311c70fc-config-data\") pod \"nova-metadata-0\" (UID: \"b9502006-ca4a-4a71-90ef-4f86311c70fc\") " pod="openstack/nova-metadata-0" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.349585 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9502006-ca4a-4a71-90ef-4f86311c70fc-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b9502006-ca4a-4a71-90ef-4f86311c70fc\") " pod="openstack/nova-metadata-0" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.349673 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9f2f\" (UniqueName: \"kubernetes.io/projected/b9502006-ca4a-4a71-90ef-4f86311c70fc-kube-api-access-f9f2f\") pod \"nova-metadata-0\" (UID: \"b9502006-ca4a-4a71-90ef-4f86311c70fc\") " pod="openstack/nova-metadata-0" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.349706 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9502006-ca4a-4a71-90ef-4f86311c70fc-logs\") pod \"nova-metadata-0\" (UID: \"b9502006-ca4a-4a71-90ef-4f86311c70fc\") " pod="openstack/nova-metadata-0" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.349741 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9502006-ca4a-4a71-90ef-4f86311c70fc-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b9502006-ca4a-4a71-90ef-4f86311c70fc\") " pod="openstack/nova-metadata-0" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.452305 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9f2f\" (UniqueName: \"kubernetes.io/projected/b9502006-ca4a-4a71-90ef-4f86311c70fc-kube-api-access-f9f2f\") pod \"nova-metadata-0\" (UID: \"b9502006-ca4a-4a71-90ef-4f86311c70fc\") " pod="openstack/nova-metadata-0" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.452390 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9502006-ca4a-4a71-90ef-4f86311c70fc-logs\") pod \"nova-metadata-0\" (UID: \"b9502006-ca4a-4a71-90ef-4f86311c70fc\") " pod="openstack/nova-metadata-0" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.452499 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9502006-ca4a-4a71-90ef-4f86311c70fc-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b9502006-ca4a-4a71-90ef-4f86311c70fc\") " pod="openstack/nova-metadata-0" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.452567 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9502006-ca4a-4a71-90ef-4f86311c70fc-config-data\") pod \"nova-metadata-0\" (UID: \"b9502006-ca4a-4a71-90ef-4f86311c70fc\") " pod="openstack/nova-metadata-0" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.452679 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9502006-ca4a-4a71-90ef-4f86311c70fc-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b9502006-ca4a-4a71-90ef-4f86311c70fc\") " pod="openstack/nova-metadata-0" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.453311 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9502006-ca4a-4a71-90ef-4f86311c70fc-logs\") pod \"nova-metadata-0\" (UID: \"b9502006-ca4a-4a71-90ef-4f86311c70fc\") " pod="openstack/nova-metadata-0" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.464201 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9502006-ca4a-4a71-90ef-4f86311c70fc-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b9502006-ca4a-4a71-90ef-4f86311c70fc\") " pod="openstack/nova-metadata-0" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.464230 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9502006-ca4a-4a71-90ef-4f86311c70fc-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b9502006-ca4a-4a71-90ef-4f86311c70fc\") " pod="openstack/nova-metadata-0" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.464364 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9502006-ca4a-4a71-90ef-4f86311c70fc-config-data\") pod \"nova-metadata-0\" (UID: \"b9502006-ca4a-4a71-90ef-4f86311c70fc\") " pod="openstack/nova-metadata-0" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.470299 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9f2f\" (UniqueName: \"kubernetes.io/projected/b9502006-ca4a-4a71-90ef-4f86311c70fc-kube-api-access-f9f2f\") pod \"nova-metadata-0\" (UID: \"b9502006-ca4a-4a71-90ef-4f86311c70fc\") " pod="openstack/nova-metadata-0" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.577094 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 13 06:52:38 crc kubenswrapper[5048]: I1213 06:52:38.579742 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="074ecf6d-bc26-46eb-8cf0-ffcf4612a773" path="/var/lib/kubelet/pods/074ecf6d-bc26-46eb-8cf0-ffcf4612a773/volumes" Dec 13 06:52:39 crc kubenswrapper[5048]: I1213 06:52:39.036618 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 13 06:52:39 crc kubenswrapper[5048]: I1213 06:52:39.890693 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b9502006-ca4a-4a71-90ef-4f86311c70fc","Type":"ContainerStarted","Data":"1fc4dba3c4f6e1f7e9026d19ca89b436dc5056f947cd8cee613a9d84297e3810"} Dec 13 06:52:39 crc kubenswrapper[5048]: I1213 06:52:39.890969 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b9502006-ca4a-4a71-90ef-4f86311c70fc","Type":"ContainerStarted","Data":"635a12e57dfce9078427bfc1142bc666791d50ddf288c167aeff74f617cc430b"} Dec 13 06:52:39 crc kubenswrapper[5048]: I1213 06:52:39.890979 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b9502006-ca4a-4a71-90ef-4f86311c70fc","Type":"ContainerStarted","Data":"14f960320fdbc0fe11f006bfaaca2643afc4d66e42e4b95a25e08c71dafdae48"} Dec 13 06:52:39 crc kubenswrapper[5048]: I1213 06:52:39.909822 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=1.909800155 podStartE2EDuration="1.909800155s" podCreationTimestamp="2025-12-13 06:52:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:52:39.90624116 +0000 UTC m=+1393.772835751" watchObservedRunningTime="2025-12-13 06:52:39.909800155 +0000 UTC m=+1393.776394736" Dec 13 06:52:41 crc kubenswrapper[5048]: I1213 06:52:41.229294 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 13 06:52:43 crc kubenswrapper[5048]: I1213 06:52:43.578618 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 13 06:52:43 crc kubenswrapper[5048]: I1213 06:52:43.578941 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 13 06:52:46 crc kubenswrapper[5048]: I1213 06:52:46.215975 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:52:46 crc kubenswrapper[5048]: I1213 06:52:46.216285 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:52:46 crc kubenswrapper[5048]: I1213 06:52:46.229234 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 13 06:52:46 crc kubenswrapper[5048]: I1213 06:52:46.257903 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 13 06:52:46 crc kubenswrapper[5048]: I1213 06:52:46.267344 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 13 06:52:46 crc kubenswrapper[5048]: I1213 06:52:46.267569 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 13 06:52:46 crc kubenswrapper[5048]: I1213 06:52:46.980228 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 13 06:52:47 crc kubenswrapper[5048]: I1213 06:52:47.279617 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="40f60c37-7e8e-4a30-9d38-26296975a60c" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.201:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 13 06:52:47 crc kubenswrapper[5048]: I1213 06:52:47.279667 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="40f60c37-7e8e-4a30-9d38-26296975a60c" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.201:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 13 06:52:48 crc kubenswrapper[5048]: I1213 06:52:48.578014 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 13 06:52:48 crc kubenswrapper[5048]: I1213 06:52:48.578062 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 13 06:52:49 crc kubenswrapper[5048]: I1213 06:52:49.698795 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="b9502006-ca4a-4a71-90ef-4f86311c70fc" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.202:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 13 06:52:49 crc kubenswrapper[5048]: I1213 06:52:49.698729 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="b9502006-ca4a-4a71-90ef-4f86311c70fc" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.202:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 13 06:52:55 crc kubenswrapper[5048]: I1213 06:52:55.627652 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 13 06:52:56 crc kubenswrapper[5048]: I1213 06:52:56.276852 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 13 06:52:56 crc kubenswrapper[5048]: I1213 06:52:56.277521 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 13 06:52:56 crc kubenswrapper[5048]: I1213 06:52:56.278958 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 13 06:52:56 crc kubenswrapper[5048]: I1213 06:52:56.300650 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 13 06:52:57 crc kubenswrapper[5048]: I1213 06:52:57.215598 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 13 06:52:57 crc kubenswrapper[5048]: I1213 06:52:57.221799 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 13 06:52:58 crc kubenswrapper[5048]: I1213 06:52:58.583299 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 13 06:52:58 crc kubenswrapper[5048]: I1213 06:52:58.583938 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 13 06:52:58 crc kubenswrapper[5048]: I1213 06:52:58.592022 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 13 06:52:59 crc kubenswrapper[5048]: I1213 06:52:59.239880 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 13 06:53:08 crc kubenswrapper[5048]: I1213 06:53:08.888384 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 13 06:53:10 crc kubenswrapper[5048]: I1213 06:53:10.335182 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 13 06:53:13 crc kubenswrapper[5048]: I1213 06:53:13.228082 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="a7bbc535-10f7-44cc-89a6-cbb697149e4a" containerName="rabbitmq" containerID="cri-o://76983382cf92288123a8dd9336351247957426def1673de16e24790c2e0b5eaa" gracePeriod=604796 Dec 13 06:53:14 crc kubenswrapper[5048]: I1213 06:53:14.478917 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="e3a0bce1-8848-4ac7-a030-19640b952708" containerName="rabbitmq" containerID="cri-o://6037c65fef82f78efa1cd645f8f6f532d5d7048d1b16ac91f759541da6dbceee" gracePeriod=604796 Dec 13 06:53:16 crc kubenswrapper[5048]: I1213 06:53:16.216269 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:53:16 crc kubenswrapper[5048]: I1213 06:53:16.216696 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:53:16 crc kubenswrapper[5048]: I1213 06:53:16.216756 5048 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 06:53:16 crc kubenswrapper[5048]: I1213 06:53:16.217590 5048 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"73b8468a0b14a8e6512874e20fb2d8442254eac77f67c3fab0272cfdb9a926da"} pod="openshift-machine-config-operator/machine-config-daemon-j7hns" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 13 06:53:16 crc kubenswrapper[5048]: I1213 06:53:16.217661 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" containerID="cri-o://73b8468a0b14a8e6512874e20fb2d8442254eac77f67c3fab0272cfdb9a926da" gracePeriod=600 Dec 13 06:53:16 crc kubenswrapper[5048]: I1213 06:53:16.390060 5048 generic.go:334] "Generic (PLEG): container finished" podID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerID="73b8468a0b14a8e6512874e20fb2d8442254eac77f67c3fab0272cfdb9a926da" exitCode=0 Dec 13 06:53:16 crc kubenswrapper[5048]: I1213 06:53:16.390109 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerDied","Data":"73b8468a0b14a8e6512874e20fb2d8442254eac77f67c3fab0272cfdb9a926da"} Dec 13 06:53:16 crc kubenswrapper[5048]: I1213 06:53:16.390147 5048 scope.go:117] "RemoveContainer" containerID="3dc134f93584bad71d64c1b4cfe7bce9b820cc019159e00fde72b17e966595a4" Dec 13 06:53:17 crc kubenswrapper[5048]: I1213 06:53:17.400809 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerStarted","Data":"c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862"} Dec 13 06:53:20 crc kubenswrapper[5048]: I1213 06:53:20.544886 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="a7bbc535-10f7-44cc-89a6-cbb697149e4a" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.98:5671: connect: connection refused" Dec 13 06:53:20 crc kubenswrapper[5048]: I1213 06:53:20.602951 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="e3a0bce1-8848-4ac7-a030-19640b952708" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.99:5671: connect: connection refused" Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.439493 5048 generic.go:334] "Generic (PLEG): container finished" podID="a7bbc535-10f7-44cc-89a6-cbb697149e4a" containerID="76983382cf92288123a8dd9336351247957426def1673de16e24790c2e0b5eaa" exitCode=0 Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.439881 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"a7bbc535-10f7-44cc-89a6-cbb697149e4a","Type":"ContainerDied","Data":"76983382cf92288123a8dd9336351247957426def1673de16e24790c2e0b5eaa"} Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.749291 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.876766 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-confd\") pod \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.876814 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a7bbc535-10f7-44cc-89a6-cbb697149e4a-erlang-cookie-secret\") pod \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.876840 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-plugins\") pod \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.876902 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a7bbc535-10f7-44cc-89a6-cbb697149e4a-config-data\") pod \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.876936 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-tls\") pod \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.876991 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fbhlw\" (UniqueName: \"kubernetes.io/projected/a7bbc535-10f7-44cc-89a6-cbb697149e4a-kube-api-access-fbhlw\") pod \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.877071 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-erlang-cookie\") pod \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.877123 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a7bbc535-10f7-44cc-89a6-cbb697149e4a-plugins-conf\") pod \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.877151 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a7bbc535-10f7-44cc-89a6-cbb697149e4a-pod-info\") pod \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.877269 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.877300 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a7bbc535-10f7-44cc-89a6-cbb697149e4a-server-conf\") pod \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\" (UID: \"a7bbc535-10f7-44cc-89a6-cbb697149e4a\") " Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.877671 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "a7bbc535-10f7-44cc-89a6-cbb697149e4a" (UID: "a7bbc535-10f7-44cc-89a6-cbb697149e4a"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.877728 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "a7bbc535-10f7-44cc-89a6-cbb697149e4a" (UID: "a7bbc535-10f7-44cc-89a6-cbb697149e4a"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.877783 5048 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.877793 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7bbc535-10f7-44cc-89a6-cbb697149e4a-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "a7bbc535-10f7-44cc-89a6-cbb697149e4a" (UID: "a7bbc535-10f7-44cc-89a6-cbb697149e4a"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.883260 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "a7bbc535-10f7-44cc-89a6-cbb697149e4a" (UID: "a7bbc535-10f7-44cc-89a6-cbb697149e4a"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.883886 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7bbc535-10f7-44cc-89a6-cbb697149e4a-kube-api-access-fbhlw" (OuterVolumeSpecName: "kube-api-access-fbhlw") pod "a7bbc535-10f7-44cc-89a6-cbb697149e4a" (UID: "a7bbc535-10f7-44cc-89a6-cbb697149e4a"). InnerVolumeSpecName "kube-api-access-fbhlw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.884685 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7bbc535-10f7-44cc-89a6-cbb697149e4a-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "a7bbc535-10f7-44cc-89a6-cbb697149e4a" (UID: "a7bbc535-10f7-44cc-89a6-cbb697149e4a"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.890143 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/a7bbc535-10f7-44cc-89a6-cbb697149e4a-pod-info" (OuterVolumeSpecName: "pod-info") pod "a7bbc535-10f7-44cc-89a6-cbb697149e4a" (UID: "a7bbc535-10f7-44cc-89a6-cbb697149e4a"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.894687 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "persistence") pod "a7bbc535-10f7-44cc-89a6-cbb697149e4a" (UID: "a7bbc535-10f7-44cc-89a6-cbb697149e4a"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.909567 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7bbc535-10f7-44cc-89a6-cbb697149e4a-config-data" (OuterVolumeSpecName: "config-data") pod "a7bbc535-10f7-44cc-89a6-cbb697149e4a" (UID: "a7bbc535-10f7-44cc-89a6-cbb697149e4a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.934596 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7bbc535-10f7-44cc-89a6-cbb697149e4a-server-conf" (OuterVolumeSpecName: "server-conf") pod "a7bbc535-10f7-44cc-89a6-cbb697149e4a" (UID: "a7bbc535-10f7-44cc-89a6-cbb697149e4a"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.979743 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a7bbc535-10f7-44cc-89a6-cbb697149e4a-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.979788 5048 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.979802 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fbhlw\" (UniqueName: \"kubernetes.io/projected/a7bbc535-10f7-44cc-89a6-cbb697149e4a-kube-api-access-fbhlw\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.979815 5048 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.979826 5048 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a7bbc535-10f7-44cc-89a6-cbb697149e4a-plugins-conf\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.979837 5048 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a7bbc535-10f7-44cc-89a6-cbb697149e4a-pod-info\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.979875 5048 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.979887 5048 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a7bbc535-10f7-44cc-89a6-cbb697149e4a-server-conf\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:21 crc kubenswrapper[5048]: I1213 06:53:21.979899 5048 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a7bbc535-10f7-44cc-89a6-cbb697149e4a-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.003015 5048 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.006676 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "a7bbc535-10f7-44cc-89a6-cbb697149e4a" (UID: "a7bbc535-10f7-44cc-89a6-cbb697149e4a"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.081868 5048 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.081899 5048 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a7bbc535-10f7-44cc-89a6-cbb697149e4a-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.362145 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.471768 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"a7bbc535-10f7-44cc-89a6-cbb697149e4a","Type":"ContainerDied","Data":"eb323ae4c8b80fcb7c3da407b10e37d9ffd08382ef8f5995222bdb508be16a06"} Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.471825 5048 scope.go:117] "RemoveContainer" containerID="76983382cf92288123a8dd9336351247957426def1673de16e24790c2e0b5eaa" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.471985 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.489577 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e3a0bce1-8848-4ac7-a030-19640b952708-server-conf\") pod \"e3a0bce1-8848-4ac7-a030-19640b952708\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.489636 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-erlang-cookie\") pod \"e3a0bce1-8848-4ac7-a030-19640b952708\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.489684 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e3a0bce1-8848-4ac7-a030-19640b952708-plugins-conf\") pod \"e3a0bce1-8848-4ac7-a030-19640b952708\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.489796 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zdlfb\" (UniqueName: \"kubernetes.io/projected/e3a0bce1-8848-4ac7-a030-19640b952708-kube-api-access-zdlfb\") pod \"e3a0bce1-8848-4ac7-a030-19640b952708\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.489899 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-tls\") pod \"e3a0bce1-8848-4ac7-a030-19640b952708\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.489937 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"e3a0bce1-8848-4ac7-a030-19640b952708\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.490162 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-plugins\") pod \"e3a0bce1-8848-4ac7-a030-19640b952708\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.490182 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-confd\") pod \"e3a0bce1-8848-4ac7-a030-19640b952708\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.490206 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e3a0bce1-8848-4ac7-a030-19640b952708-erlang-cookie-secret\") pod \"e3a0bce1-8848-4ac7-a030-19640b952708\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.490273 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e3a0bce1-8848-4ac7-a030-19640b952708-pod-info\") pod \"e3a0bce1-8848-4ac7-a030-19640b952708\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.490320 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e3a0bce1-8848-4ac7-a030-19640b952708-config-data\") pod \"e3a0bce1-8848-4ac7-a030-19640b952708\" (UID: \"e3a0bce1-8848-4ac7-a030-19640b952708\") " Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.496149 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "e3a0bce1-8848-4ac7-a030-19640b952708" (UID: "e3a0bce1-8848-4ac7-a030-19640b952708"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.502571 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "e3a0bce1-8848-4ac7-a030-19640b952708" (UID: "e3a0bce1-8848-4ac7-a030-19640b952708"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.503277 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3a0bce1-8848-4ac7-a030-19640b952708-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "e3a0bce1-8848-4ac7-a030-19640b952708" (UID: "e3a0bce1-8848-4ac7-a030-19640b952708"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.503501 5048 generic.go:334] "Generic (PLEG): container finished" podID="e3a0bce1-8848-4ac7-a030-19640b952708" containerID="6037c65fef82f78efa1cd645f8f6f532d5d7048d1b16ac91f759541da6dbceee" exitCode=0 Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.503537 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e3a0bce1-8848-4ac7-a030-19640b952708","Type":"ContainerDied","Data":"6037c65fef82f78efa1cd645f8f6f532d5d7048d1b16ac91f759541da6dbceee"} Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.503567 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e3a0bce1-8848-4ac7-a030-19640b952708","Type":"ContainerDied","Data":"ff8ddcca404a55d9d44ca4ef95d5b60c4be2b56540a9e152bd26bda198734473"} Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.503637 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.520754 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "e3a0bce1-8848-4ac7-a030-19640b952708" (UID: "e3a0bce1-8848-4ac7-a030-19640b952708"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.522566 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3a0bce1-8848-4ac7-a030-19640b952708-kube-api-access-zdlfb" (OuterVolumeSpecName: "kube-api-access-zdlfb") pod "e3a0bce1-8848-4ac7-a030-19640b952708" (UID: "e3a0bce1-8848-4ac7-a030-19640b952708"). InnerVolumeSpecName "kube-api-access-zdlfb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.536575 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "persistence") pod "e3a0bce1-8848-4ac7-a030-19640b952708" (UID: "e3a0bce1-8848-4ac7-a030-19640b952708"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.536623 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/e3a0bce1-8848-4ac7-a030-19640b952708-pod-info" (OuterVolumeSpecName: "pod-info") pod "e3a0bce1-8848-4ac7-a030-19640b952708" (UID: "e3a0bce1-8848-4ac7-a030-19640b952708"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.565117 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3a0bce1-8848-4ac7-a030-19640b952708-config-data" (OuterVolumeSpecName: "config-data") pod "e3a0bce1-8848-4ac7-a030-19640b952708" (UID: "e3a0bce1-8848-4ac7-a030-19640b952708"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.565559 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3a0bce1-8848-4ac7-a030-19640b952708-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "e3a0bce1-8848-4ac7-a030-19640b952708" (UID: "e3a0bce1-8848-4ac7-a030-19640b952708"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.581536 5048 scope.go:117] "RemoveContainer" containerID="7a1fa96f3b72ad7f4e5857f6a5eb077b15e2b09603a5ab2399dffe44b8b8c7fd" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.595276 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e3a0bce1-8848-4ac7-a030-19640b952708-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.595304 5048 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.595315 5048 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e3a0bce1-8848-4ac7-a030-19640b952708-plugins-conf\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.595325 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zdlfb\" (UniqueName: \"kubernetes.io/projected/e3a0bce1-8848-4ac7-a030-19640b952708-kube-api-access-zdlfb\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.595334 5048 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.595355 5048 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.595364 5048 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.595372 5048 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e3a0bce1-8848-4ac7-a030-19640b952708-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.595380 5048 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e3a0bce1-8848-4ac7-a030-19640b952708-pod-info\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.608359 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3a0bce1-8848-4ac7-a030-19640b952708-server-conf" (OuterVolumeSpecName: "server-conf") pod "e3a0bce1-8848-4ac7-a030-19640b952708" (UID: "e3a0bce1-8848-4ac7-a030-19640b952708"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.616497 5048 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.640490 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.667818 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.710595 5048 scope.go:117] "RemoveContainer" containerID="6037c65fef82f78efa1cd645f8f6f532d5d7048d1b16ac91f759541da6dbceee" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.719632 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.719708 5048 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e3a0bce1-8848-4ac7-a030-19640b952708-server-conf\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.719733 5048 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:22 crc kubenswrapper[5048]: E1213 06:53:22.720089 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7bbc535-10f7-44cc-89a6-cbb697149e4a" containerName="rabbitmq" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.720108 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7bbc535-10f7-44cc-89a6-cbb697149e4a" containerName="rabbitmq" Dec 13 06:53:22 crc kubenswrapper[5048]: E1213 06:53:22.720122 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3a0bce1-8848-4ac7-a030-19640b952708" containerName="rabbitmq" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.720129 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3a0bce1-8848-4ac7-a030-19640b952708" containerName="rabbitmq" Dec 13 06:53:22 crc kubenswrapper[5048]: E1213 06:53:22.720161 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3a0bce1-8848-4ac7-a030-19640b952708" containerName="setup-container" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.720170 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3a0bce1-8848-4ac7-a030-19640b952708" containerName="setup-container" Dec 13 06:53:22 crc kubenswrapper[5048]: E1213 06:53:22.720188 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7bbc535-10f7-44cc-89a6-cbb697149e4a" containerName="setup-container" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.720196 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7bbc535-10f7-44cc-89a6-cbb697149e4a" containerName="setup-container" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.720411 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7bbc535-10f7-44cc-89a6-cbb697149e4a" containerName="rabbitmq" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.720429 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3a0bce1-8848-4ac7-a030-19640b952708" containerName="rabbitmq" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.727390 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.730075 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.734644 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.734995 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.735114 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.735509 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.735741 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-qk64f" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.737923 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.741540 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.750199 5048 scope.go:117] "RemoveContainer" containerID="75ee9b498965105594517eef849b5ed0c40d73c2d2599da5a3eae4643255329f" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.789680 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "e3a0bce1-8848-4ac7-a030-19640b952708" (UID: "e3a0bce1-8848-4ac7-a030-19640b952708"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.791990 5048 scope.go:117] "RemoveContainer" containerID="6037c65fef82f78efa1cd645f8f6f532d5d7048d1b16ac91f759541da6dbceee" Dec 13 06:53:22 crc kubenswrapper[5048]: E1213 06:53:22.792363 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6037c65fef82f78efa1cd645f8f6f532d5d7048d1b16ac91f759541da6dbceee\": container with ID starting with 6037c65fef82f78efa1cd645f8f6f532d5d7048d1b16ac91f759541da6dbceee not found: ID does not exist" containerID="6037c65fef82f78efa1cd645f8f6f532d5d7048d1b16ac91f759541da6dbceee" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.792399 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6037c65fef82f78efa1cd645f8f6f532d5d7048d1b16ac91f759541da6dbceee"} err="failed to get container status \"6037c65fef82f78efa1cd645f8f6f532d5d7048d1b16ac91f759541da6dbceee\": rpc error: code = NotFound desc = could not find container \"6037c65fef82f78efa1cd645f8f6f532d5d7048d1b16ac91f759541da6dbceee\": container with ID starting with 6037c65fef82f78efa1cd645f8f6f532d5d7048d1b16ac91f759541da6dbceee not found: ID does not exist" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.792425 5048 scope.go:117] "RemoveContainer" containerID="75ee9b498965105594517eef849b5ed0c40d73c2d2599da5a3eae4643255329f" Dec 13 06:53:22 crc kubenswrapper[5048]: E1213 06:53:22.792882 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"75ee9b498965105594517eef849b5ed0c40d73c2d2599da5a3eae4643255329f\": container with ID starting with 75ee9b498965105594517eef849b5ed0c40d73c2d2599da5a3eae4643255329f not found: ID does not exist" containerID="75ee9b498965105594517eef849b5ed0c40d73c2d2599da5a3eae4643255329f" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.792929 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75ee9b498965105594517eef849b5ed0c40d73c2d2599da5a3eae4643255329f"} err="failed to get container status \"75ee9b498965105594517eef849b5ed0c40d73c2d2599da5a3eae4643255329f\": rpc error: code = NotFound desc = could not find container \"75ee9b498965105594517eef849b5ed0c40d73c2d2599da5a3eae4643255329f\": container with ID starting with 75ee9b498965105594517eef849b5ed0c40d73c2d2599da5a3eae4643255329f not found: ID does not exist" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.821556 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.821619 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cd2c9077-4969-4d54-a677-2f84128c1a13-pod-info\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.821677 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cd2c9077-4969-4d54-a677-2f84128c1a13-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.821706 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cd2c9077-4969-4d54-a677-2f84128c1a13-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.821729 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cd2c9077-4969-4d54-a677-2f84128c1a13-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.821756 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jc28\" (UniqueName: \"kubernetes.io/projected/cd2c9077-4969-4d54-a677-2f84128c1a13-kube-api-access-4jc28\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.821792 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cd2c9077-4969-4d54-a677-2f84128c1a13-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.821815 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cd2c9077-4969-4d54-a677-2f84128c1a13-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.821849 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cd2c9077-4969-4d54-a677-2f84128c1a13-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.821878 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cd2c9077-4969-4d54-a677-2f84128c1a13-server-conf\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.821906 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cd2c9077-4969-4d54-a677-2f84128c1a13-config-data\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.821979 5048 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e3a0bce1-8848-4ac7-a030-19640b952708-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.869236 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.880200 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.906335 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.912412 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.919201 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.919461 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-gnwjj" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.919566 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.919812 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.920408 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.920683 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.921170 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.922745 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.923349 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.923396 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cd2c9077-4969-4d54-a677-2f84128c1a13-pod-info\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.923460 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cd2c9077-4969-4d54-a677-2f84128c1a13-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.923480 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cd2c9077-4969-4d54-a677-2f84128c1a13-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.923497 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cd2c9077-4969-4d54-a677-2f84128c1a13-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.923514 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jc28\" (UniqueName: \"kubernetes.io/projected/cd2c9077-4969-4d54-a677-2f84128c1a13-kube-api-access-4jc28\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.923539 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cd2c9077-4969-4d54-a677-2f84128c1a13-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.923559 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cd2c9077-4969-4d54-a677-2f84128c1a13-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.923588 5048 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.924318 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cd2c9077-4969-4d54-a677-2f84128c1a13-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.924420 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cd2c9077-4969-4d54-a677-2f84128c1a13-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.925150 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cd2c9077-4969-4d54-a677-2f84128c1a13-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.923598 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cd2c9077-4969-4d54-a677-2f84128c1a13-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.926587 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cd2c9077-4969-4d54-a677-2f84128c1a13-server-conf\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.926632 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cd2c9077-4969-4d54-a677-2f84128c1a13-config-data\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.927709 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cd2c9077-4969-4d54-a677-2f84128c1a13-config-data\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.927842 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cd2c9077-4969-4d54-a677-2f84128c1a13-server-conf\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.931163 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cd2c9077-4969-4d54-a677-2f84128c1a13-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.948260 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cd2c9077-4969-4d54-a677-2f84128c1a13-pod-info\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.949968 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cd2c9077-4969-4d54-a677-2f84128c1a13-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.958235 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cd2c9077-4969-4d54-a677-2f84128c1a13-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.959004 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jc28\" (UniqueName: \"kubernetes.io/projected/cd2c9077-4969-4d54-a677-2f84128c1a13-kube-api-access-4jc28\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:22 crc kubenswrapper[5048]: I1213 06:53:22.987674 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"cd2c9077-4969-4d54-a677-2f84128c1a13\") " pod="openstack/rabbitmq-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.027958 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.028050 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b6586a43-004c-41f4-9172-b3b385849341-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.028111 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b6586a43-004c-41f4-9172-b3b385849341-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.028135 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b6586a43-004c-41f4-9172-b3b385849341-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.028258 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b6586a43-004c-41f4-9172-b3b385849341-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.028303 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b6586a43-004c-41f4-9172-b3b385849341-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.028635 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hwl8\" (UniqueName: \"kubernetes.io/projected/b6586a43-004c-41f4-9172-b3b385849341-kube-api-access-6hwl8\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.029532 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b6586a43-004c-41f4-9172-b3b385849341-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.029571 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b6586a43-004c-41f4-9172-b3b385849341-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.029619 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b6586a43-004c-41f4-9172-b3b385849341-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.029644 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b6586a43-004c-41f4-9172-b3b385849341-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.131466 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b6586a43-004c-41f4-9172-b3b385849341-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.131826 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b6586a43-004c-41f4-9172-b3b385849341-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.131881 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.131922 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b6586a43-004c-41f4-9172-b3b385849341-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.131956 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b6586a43-004c-41f4-9172-b3b385849341-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.131976 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b6586a43-004c-41f4-9172-b3b385849341-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.132040 5048 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.132059 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b6586a43-004c-41f4-9172-b3b385849341-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.132092 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b6586a43-004c-41f4-9172-b3b385849341-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.132163 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hwl8\" (UniqueName: \"kubernetes.io/projected/b6586a43-004c-41f4-9172-b3b385849341-kube-api-access-6hwl8\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.132190 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b6586a43-004c-41f4-9172-b3b385849341-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.132220 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b6586a43-004c-41f4-9172-b3b385849341-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.132253 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b6586a43-004c-41f4-9172-b3b385849341-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.132548 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b6586a43-004c-41f4-9172-b3b385849341-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.133033 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b6586a43-004c-41f4-9172-b3b385849341-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.133067 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b6586a43-004c-41f4-9172-b3b385849341-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.133576 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b6586a43-004c-41f4-9172-b3b385849341-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.136631 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b6586a43-004c-41f4-9172-b3b385849341-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.136783 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b6586a43-004c-41f4-9172-b3b385849341-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.136967 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b6586a43-004c-41f4-9172-b3b385849341-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.137644 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b6586a43-004c-41f4-9172-b3b385849341-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.162792 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.170314 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hwl8\" (UniqueName: \"kubernetes.io/projected/b6586a43-004c-41f4-9172-b3b385849341-kube-api-access-6hwl8\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.179988 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b6586a43-004c-41f4-9172-b3b385849341\") " pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.247044 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.698088 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 13 06:53:23 crc kubenswrapper[5048]: I1213 06:53:23.800428 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.323207 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-d558885bc-9d6qc"] Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.324938 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.327247 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.347297 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-9d6qc"] Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.462800 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvlgw\" (UniqueName: \"kubernetes.io/projected/45d19232-1715-4345-82a0-c53a64569957-kube-api-access-rvlgw\") pod \"dnsmasq-dns-d558885bc-9d6qc\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.462870 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-dns-swift-storage-0\") pod \"dnsmasq-dns-d558885bc-9d6qc\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.462888 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-ovsdbserver-sb\") pod \"dnsmasq-dns-d558885bc-9d6qc\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.462906 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-ovsdbserver-nb\") pod \"dnsmasq-dns-d558885bc-9d6qc\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.463117 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-dns-svc\") pod \"dnsmasq-dns-d558885bc-9d6qc\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.463269 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-config\") pod \"dnsmasq-dns-d558885bc-9d6qc\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.463515 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-openstack-edpm-ipam\") pod \"dnsmasq-dns-d558885bc-9d6qc\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.530992 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b6586a43-004c-41f4-9172-b3b385849341","Type":"ContainerStarted","Data":"8528b9550bd8befc6b97c6024f367bd7ffcde44f08afce768fe7e8c39f54d5b7"} Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.532130 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"cd2c9077-4969-4d54-a677-2f84128c1a13","Type":"ContainerStarted","Data":"9d53e112a1b0f4fe64dcbf73aab35ddaa49e4c474dff674b6147ca60bea8e9cc"} Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.565600 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-config\") pod \"dnsmasq-dns-d558885bc-9d6qc\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.565733 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-openstack-edpm-ipam\") pod \"dnsmasq-dns-d558885bc-9d6qc\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.565807 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvlgw\" (UniqueName: \"kubernetes.io/projected/45d19232-1715-4345-82a0-c53a64569957-kube-api-access-rvlgw\") pod \"dnsmasq-dns-d558885bc-9d6qc\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.565870 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-dns-swift-storage-0\") pod \"dnsmasq-dns-d558885bc-9d6qc\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.565900 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-ovsdbserver-sb\") pod \"dnsmasq-dns-d558885bc-9d6qc\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.565923 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-ovsdbserver-nb\") pod \"dnsmasq-dns-d558885bc-9d6qc\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.565968 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-dns-svc\") pod \"dnsmasq-dns-d558885bc-9d6qc\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.566586 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-config\") pod \"dnsmasq-dns-d558885bc-9d6qc\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.568469 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-dns-svc\") pod \"dnsmasq-dns-d558885bc-9d6qc\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.568807 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-ovsdbserver-nb\") pod \"dnsmasq-dns-d558885bc-9d6qc\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.568884 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-openstack-edpm-ipam\") pod \"dnsmasq-dns-d558885bc-9d6qc\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.569336 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-ovsdbserver-sb\") pod \"dnsmasq-dns-d558885bc-9d6qc\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.570347 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-dns-swift-storage-0\") pod \"dnsmasq-dns-d558885bc-9d6qc\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.580934 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7bbc535-10f7-44cc-89a6-cbb697149e4a" path="/var/lib/kubelet/pods/a7bbc535-10f7-44cc-89a6-cbb697149e4a/volumes" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.679851 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3a0bce1-8848-4ac7-a030-19640b952708" path="/var/lib/kubelet/pods/e3a0bce1-8848-4ac7-a030-19640b952708/volumes" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.686854 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvlgw\" (UniqueName: \"kubernetes.io/projected/45d19232-1715-4345-82a0-c53a64569957-kube-api-access-rvlgw\") pod \"dnsmasq-dns-d558885bc-9d6qc\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:24 crc kubenswrapper[5048]: I1213 06:53:24.942605 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:25 crc kubenswrapper[5048]: I1213 06:53:25.542253 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b6586a43-004c-41f4-9172-b3b385849341","Type":"ContainerStarted","Data":"ade3ff8a56872eea72f69b33d86f432c0b4f4ec977af84e6f7c4c1f91844f0bf"} Dec 13 06:53:25 crc kubenswrapper[5048]: I1213 06:53:25.546029 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"cd2c9077-4969-4d54-a677-2f84128c1a13","Type":"ContainerStarted","Data":"e9f10099e3482d309e58d5e9a145d9ffed177ea74b038b39fbf1da9b73488295"} Dec 13 06:53:25 crc kubenswrapper[5048]: I1213 06:53:25.569760 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-9d6qc"] Dec 13 06:53:25 crc kubenswrapper[5048]: W1213 06:53:25.575224 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod45d19232_1715_4345_82a0_c53a64569957.slice/crio-5ceb7c79657d470d2592623f23c1519e48c0746954fd9ab4e0507683f58bf5b9 WatchSource:0}: Error finding container 5ceb7c79657d470d2592623f23c1519e48c0746954fd9ab4e0507683f58bf5b9: Status 404 returned error can't find the container with id 5ceb7c79657d470d2592623f23c1519e48c0746954fd9ab4e0507683f58bf5b9 Dec 13 06:53:26 crc kubenswrapper[5048]: I1213 06:53:26.556816 5048 generic.go:334] "Generic (PLEG): container finished" podID="45d19232-1715-4345-82a0-c53a64569957" containerID="8416cf6a5e6d26dfc3d7aa1e44adb5b8802472370b902a3d061d19deeeb5b17f" exitCode=0 Dec 13 06:53:26 crc kubenswrapper[5048]: I1213 06:53:26.559917 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-9d6qc" event={"ID":"45d19232-1715-4345-82a0-c53a64569957","Type":"ContainerDied","Data":"8416cf6a5e6d26dfc3d7aa1e44adb5b8802472370b902a3d061d19deeeb5b17f"} Dec 13 06:53:26 crc kubenswrapper[5048]: I1213 06:53:26.559997 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-9d6qc" event={"ID":"45d19232-1715-4345-82a0-c53a64569957","Type":"ContainerStarted","Data":"5ceb7c79657d470d2592623f23c1519e48c0746954fd9ab4e0507683f58bf5b9"} Dec 13 06:53:27 crc kubenswrapper[5048]: I1213 06:53:27.570294 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-9d6qc" event={"ID":"45d19232-1715-4345-82a0-c53a64569957","Type":"ContainerStarted","Data":"bb48f4983fa71d6e148d1054ef3e84cf731b244045dd4a194cf685a98c5a118d"} Dec 13 06:53:27 crc kubenswrapper[5048]: I1213 06:53:27.571122 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:27 crc kubenswrapper[5048]: I1213 06:53:27.590731 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-d558885bc-9d6qc" podStartSLOduration=3.590709306 podStartE2EDuration="3.590709306s" podCreationTimestamp="2025-12-13 06:53:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:53:27.586420468 +0000 UTC m=+1441.453015059" watchObservedRunningTime="2025-12-13 06:53:27.590709306 +0000 UTC m=+1441.457303887" Dec 13 06:53:34 crc kubenswrapper[5048]: I1213 06:53:34.946167 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:34 crc kubenswrapper[5048]: I1213 06:53:34.997570 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-w9nkl"] Dec 13 06:53:34 crc kubenswrapper[5048]: I1213 06:53:34.997845 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" podUID="92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048" containerName="dnsmasq-dns" containerID="cri-o://cbec161e4e03aefd8a7b0d98502ed6591fea05ca350976b48fb88a6aaef172f4" gracePeriod=10 Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.143589 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78c64bc9c5-zws2g"] Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.148052 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.178227 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78c64bc9c5-zws2g"] Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.275214 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-htghd\" (UniqueName: \"kubernetes.io/projected/40c89533-f85e-4c8f-9826-f1affe855947-kube-api-access-htghd\") pod \"dnsmasq-dns-78c64bc9c5-zws2g\" (UID: \"40c89533-f85e-4c8f-9826-f1affe855947\") " pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.275301 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40c89533-f85e-4c8f-9826-f1affe855947-ovsdbserver-sb\") pod \"dnsmasq-dns-78c64bc9c5-zws2g\" (UID: \"40c89533-f85e-4c8f-9826-f1affe855947\") " pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.275335 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40c89533-f85e-4c8f-9826-f1affe855947-ovsdbserver-nb\") pod \"dnsmasq-dns-78c64bc9c5-zws2g\" (UID: \"40c89533-f85e-4c8f-9826-f1affe855947\") " pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.275365 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40c89533-f85e-4c8f-9826-f1affe855947-config\") pod \"dnsmasq-dns-78c64bc9c5-zws2g\" (UID: \"40c89533-f85e-4c8f-9826-f1affe855947\") " pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.275486 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/40c89533-f85e-4c8f-9826-f1affe855947-dns-swift-storage-0\") pod \"dnsmasq-dns-78c64bc9c5-zws2g\" (UID: \"40c89533-f85e-4c8f-9826-f1affe855947\") " pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.275553 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/40c89533-f85e-4c8f-9826-f1affe855947-openstack-edpm-ipam\") pod \"dnsmasq-dns-78c64bc9c5-zws2g\" (UID: \"40c89533-f85e-4c8f-9826-f1affe855947\") " pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.275642 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40c89533-f85e-4c8f-9826-f1affe855947-dns-svc\") pod \"dnsmasq-dns-78c64bc9c5-zws2g\" (UID: \"40c89533-f85e-4c8f-9826-f1affe855947\") " pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.377043 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/40c89533-f85e-4c8f-9826-f1affe855947-dns-swift-storage-0\") pod \"dnsmasq-dns-78c64bc9c5-zws2g\" (UID: \"40c89533-f85e-4c8f-9826-f1affe855947\") " pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.377214 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/40c89533-f85e-4c8f-9826-f1affe855947-openstack-edpm-ipam\") pod \"dnsmasq-dns-78c64bc9c5-zws2g\" (UID: \"40c89533-f85e-4c8f-9826-f1affe855947\") " pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.377286 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40c89533-f85e-4c8f-9826-f1affe855947-dns-svc\") pod \"dnsmasq-dns-78c64bc9c5-zws2g\" (UID: \"40c89533-f85e-4c8f-9826-f1affe855947\") " pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.377328 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-htghd\" (UniqueName: \"kubernetes.io/projected/40c89533-f85e-4c8f-9826-f1affe855947-kube-api-access-htghd\") pod \"dnsmasq-dns-78c64bc9c5-zws2g\" (UID: \"40c89533-f85e-4c8f-9826-f1affe855947\") " pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.377359 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40c89533-f85e-4c8f-9826-f1affe855947-ovsdbserver-sb\") pod \"dnsmasq-dns-78c64bc9c5-zws2g\" (UID: \"40c89533-f85e-4c8f-9826-f1affe855947\") " pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.377381 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40c89533-f85e-4c8f-9826-f1affe855947-ovsdbserver-nb\") pod \"dnsmasq-dns-78c64bc9c5-zws2g\" (UID: \"40c89533-f85e-4c8f-9826-f1affe855947\") " pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.377406 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40c89533-f85e-4c8f-9826-f1affe855947-config\") pod \"dnsmasq-dns-78c64bc9c5-zws2g\" (UID: \"40c89533-f85e-4c8f-9826-f1affe855947\") " pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.378028 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/40c89533-f85e-4c8f-9826-f1affe855947-dns-swift-storage-0\") pod \"dnsmasq-dns-78c64bc9c5-zws2g\" (UID: \"40c89533-f85e-4c8f-9826-f1affe855947\") " pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.378415 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40c89533-f85e-4c8f-9826-f1affe855947-config\") pod \"dnsmasq-dns-78c64bc9c5-zws2g\" (UID: \"40c89533-f85e-4c8f-9826-f1affe855947\") " pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.379016 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/40c89533-f85e-4c8f-9826-f1affe855947-openstack-edpm-ipam\") pod \"dnsmasq-dns-78c64bc9c5-zws2g\" (UID: \"40c89533-f85e-4c8f-9826-f1affe855947\") " pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.379424 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40c89533-f85e-4c8f-9826-f1affe855947-ovsdbserver-sb\") pod \"dnsmasq-dns-78c64bc9c5-zws2g\" (UID: \"40c89533-f85e-4c8f-9826-f1affe855947\") " pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.379469 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40c89533-f85e-4c8f-9826-f1affe855947-ovsdbserver-nb\") pod \"dnsmasq-dns-78c64bc9c5-zws2g\" (UID: \"40c89533-f85e-4c8f-9826-f1affe855947\") " pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.379546 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40c89533-f85e-4c8f-9826-f1affe855947-dns-svc\") pod \"dnsmasq-dns-78c64bc9c5-zws2g\" (UID: \"40c89533-f85e-4c8f-9826-f1affe855947\") " pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.416561 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-htghd\" (UniqueName: \"kubernetes.io/projected/40c89533-f85e-4c8f-9826-f1affe855947-kube-api-access-htghd\") pod \"dnsmasq-dns-78c64bc9c5-zws2g\" (UID: \"40c89533-f85e-4c8f-9826-f1affe855947\") " pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.482276 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.578607 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.668568 5048 generic.go:334] "Generic (PLEG): container finished" podID="92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048" containerID="cbec161e4e03aefd8a7b0d98502ed6591fea05ca350976b48fb88a6aaef172f4" exitCode=0 Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.668739 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" event={"ID":"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048","Type":"ContainerDied","Data":"cbec161e4e03aefd8a7b0d98502ed6591fea05ca350976b48fb88a6aaef172f4"} Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.669677 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" event={"ID":"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048","Type":"ContainerDied","Data":"db2770aea3907b64757d2c370217c5dd67271f32fcb76a0aaae13a5c0f2b0010"} Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.669730 5048 scope.go:117] "RemoveContainer" containerID="cbec161e4e03aefd8a7b0d98502ed6591fea05ca350976b48fb88a6aaef172f4" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.668788 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-w9nkl" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.684517 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k9wp8\" (UniqueName: \"kubernetes.io/projected/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-kube-api-access-k9wp8\") pod \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.684573 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-dns-swift-storage-0\") pod \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.684625 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-dns-svc\") pod \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.684666 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-ovsdbserver-sb\") pod \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.684721 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-config\") pod \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.684846 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-ovsdbserver-nb\") pod \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\" (UID: \"92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048\") " Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.715550 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-kube-api-access-k9wp8" (OuterVolumeSpecName: "kube-api-access-k9wp8") pod "92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048" (UID: "92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048"). InnerVolumeSpecName "kube-api-access-k9wp8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.752637 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048" (UID: "92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.755540 5048 scope.go:117] "RemoveContainer" containerID="4f94737c057a78f6e311402badcbcbbe036078fa18b70b3cf76d218f96f7f0df" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.769378 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-config" (OuterVolumeSpecName: "config") pod "92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048" (UID: "92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.773280 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048" (UID: "92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.775699 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048" (UID: "92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.778559 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048" (UID: "92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.787219 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.787268 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k9wp8\" (UniqueName: \"kubernetes.io/projected/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-kube-api-access-k9wp8\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.787286 5048 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.787301 5048 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.787314 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.787349 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.787314 5048 scope.go:117] "RemoveContainer" containerID="cbec161e4e03aefd8a7b0d98502ed6591fea05ca350976b48fb88a6aaef172f4" Dec 13 06:53:35 crc kubenswrapper[5048]: E1213 06:53:35.788680 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cbec161e4e03aefd8a7b0d98502ed6591fea05ca350976b48fb88a6aaef172f4\": container with ID starting with cbec161e4e03aefd8a7b0d98502ed6591fea05ca350976b48fb88a6aaef172f4 not found: ID does not exist" containerID="cbec161e4e03aefd8a7b0d98502ed6591fea05ca350976b48fb88a6aaef172f4" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.788829 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cbec161e4e03aefd8a7b0d98502ed6591fea05ca350976b48fb88a6aaef172f4"} err="failed to get container status \"cbec161e4e03aefd8a7b0d98502ed6591fea05ca350976b48fb88a6aaef172f4\": rpc error: code = NotFound desc = could not find container \"cbec161e4e03aefd8a7b0d98502ed6591fea05ca350976b48fb88a6aaef172f4\": container with ID starting with cbec161e4e03aefd8a7b0d98502ed6591fea05ca350976b48fb88a6aaef172f4 not found: ID does not exist" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.788943 5048 scope.go:117] "RemoveContainer" containerID="4f94737c057a78f6e311402badcbcbbe036078fa18b70b3cf76d218f96f7f0df" Dec 13 06:53:35 crc kubenswrapper[5048]: E1213 06:53:35.789597 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f94737c057a78f6e311402badcbcbbe036078fa18b70b3cf76d218f96f7f0df\": container with ID starting with 4f94737c057a78f6e311402badcbcbbe036078fa18b70b3cf76d218f96f7f0df not found: ID does not exist" containerID="4f94737c057a78f6e311402badcbcbbe036078fa18b70b3cf76d218f96f7f0df" Dec 13 06:53:35 crc kubenswrapper[5048]: I1213 06:53:35.789629 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f94737c057a78f6e311402badcbcbbe036078fa18b70b3cf76d218f96f7f0df"} err="failed to get container status \"4f94737c057a78f6e311402badcbcbbe036078fa18b70b3cf76d218f96f7f0df\": rpc error: code = NotFound desc = could not find container \"4f94737c057a78f6e311402badcbcbbe036078fa18b70b3cf76d218f96f7f0df\": container with ID starting with 4f94737c057a78f6e311402badcbcbbe036078fa18b70b3cf76d218f96f7f0df not found: ID does not exist" Dec 13 06:53:36 crc kubenswrapper[5048]: I1213 06:53:36.005092 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-w9nkl"] Dec 13 06:53:36 crc kubenswrapper[5048]: I1213 06:53:36.016901 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-w9nkl"] Dec 13 06:53:36 crc kubenswrapper[5048]: I1213 06:53:36.042029 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78c64bc9c5-zws2g"] Dec 13 06:53:36 crc kubenswrapper[5048]: I1213 06:53:36.579209 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048" path="/var/lib/kubelet/pods/92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048/volumes" Dec 13 06:53:36 crc kubenswrapper[5048]: I1213 06:53:36.678228 5048 generic.go:334] "Generic (PLEG): container finished" podID="40c89533-f85e-4c8f-9826-f1affe855947" containerID="a444af8523b1ccf81b9496f9a721b67e803597175cb75772df8065f704a9dbfe" exitCode=0 Dec 13 06:53:36 crc kubenswrapper[5048]: I1213 06:53:36.678259 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" event={"ID":"40c89533-f85e-4c8f-9826-f1affe855947","Type":"ContainerDied","Data":"a444af8523b1ccf81b9496f9a721b67e803597175cb75772df8065f704a9dbfe"} Dec 13 06:53:36 crc kubenswrapper[5048]: I1213 06:53:36.678297 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" event={"ID":"40c89533-f85e-4c8f-9826-f1affe855947","Type":"ContainerStarted","Data":"2457f59441499f718827a4a1307df48500f7352cd912cdcb9b4c0f3f8d33dc0a"} Dec 13 06:53:37 crc kubenswrapper[5048]: I1213 06:53:37.689866 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" event={"ID":"40c89533-f85e-4c8f-9826-f1affe855947","Type":"ContainerStarted","Data":"b0df8b78bf6d7a9e2e50be939998d809643692568455063d3ef504511c9a6b97"} Dec 13 06:53:37 crc kubenswrapper[5048]: I1213 06:53:37.690270 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:37 crc kubenswrapper[5048]: I1213 06:53:37.715521 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" podStartSLOduration=2.71550335 podStartE2EDuration="2.71550335s" podCreationTimestamp="2025-12-13 06:53:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:53:37.710014568 +0000 UTC m=+1451.576609169" watchObservedRunningTime="2025-12-13 06:53:37.71550335 +0000 UTC m=+1451.582097931" Dec 13 06:53:45 crc kubenswrapper[5048]: I1213 06:53:45.484919 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-78c64bc9c5-zws2g" Dec 13 06:53:45 crc kubenswrapper[5048]: I1213 06:53:45.549445 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-9d6qc"] Dec 13 06:53:45 crc kubenswrapper[5048]: I1213 06:53:45.549689 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-d558885bc-9d6qc" podUID="45d19232-1715-4345-82a0-c53a64569957" containerName="dnsmasq-dns" containerID="cri-o://bb48f4983fa71d6e148d1054ef3e84cf731b244045dd4a194cf685a98c5a118d" gracePeriod=10 Dec 13 06:53:45 crc kubenswrapper[5048]: I1213 06:53:45.774540 5048 generic.go:334] "Generic (PLEG): container finished" podID="45d19232-1715-4345-82a0-c53a64569957" containerID="bb48f4983fa71d6e148d1054ef3e84cf731b244045dd4a194cf685a98c5a118d" exitCode=0 Dec 13 06:53:45 crc kubenswrapper[5048]: I1213 06:53:45.774659 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-9d6qc" event={"ID":"45d19232-1715-4345-82a0-c53a64569957","Type":"ContainerDied","Data":"bb48f4983fa71d6e148d1054ef3e84cf731b244045dd4a194cf685a98c5a118d"} Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.045258 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.077279 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-config\") pod \"45d19232-1715-4345-82a0-c53a64569957\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.077421 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-openstack-edpm-ipam\") pod \"45d19232-1715-4345-82a0-c53a64569957\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.077507 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-ovsdbserver-nb\") pod \"45d19232-1715-4345-82a0-c53a64569957\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.077598 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rvlgw\" (UniqueName: \"kubernetes.io/projected/45d19232-1715-4345-82a0-c53a64569957-kube-api-access-rvlgw\") pod \"45d19232-1715-4345-82a0-c53a64569957\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.077674 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-dns-svc\") pod \"45d19232-1715-4345-82a0-c53a64569957\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.077737 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-dns-swift-storage-0\") pod \"45d19232-1715-4345-82a0-c53a64569957\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.077817 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-ovsdbserver-sb\") pod \"45d19232-1715-4345-82a0-c53a64569957\" (UID: \"45d19232-1715-4345-82a0-c53a64569957\") " Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.112860 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45d19232-1715-4345-82a0-c53a64569957-kube-api-access-rvlgw" (OuterVolumeSpecName: "kube-api-access-rvlgw") pod "45d19232-1715-4345-82a0-c53a64569957" (UID: "45d19232-1715-4345-82a0-c53a64569957"). InnerVolumeSpecName "kube-api-access-rvlgw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.137891 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "45d19232-1715-4345-82a0-c53a64569957" (UID: "45d19232-1715-4345-82a0-c53a64569957"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.143236 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "45d19232-1715-4345-82a0-c53a64569957" (UID: "45d19232-1715-4345-82a0-c53a64569957"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.154941 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "45d19232-1715-4345-82a0-c53a64569957" (UID: "45d19232-1715-4345-82a0-c53a64569957"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.157981 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-config" (OuterVolumeSpecName: "config") pod "45d19232-1715-4345-82a0-c53a64569957" (UID: "45d19232-1715-4345-82a0-c53a64569957"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.168805 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "45d19232-1715-4345-82a0-c53a64569957" (UID: "45d19232-1715-4345-82a0-c53a64569957"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.171048 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "45d19232-1715-4345-82a0-c53a64569957" (UID: "45d19232-1715-4345-82a0-c53a64569957"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.183957 5048 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.183988 5048 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.184000 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.184011 5048 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-config\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.184018 5048 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.184026 5048 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45d19232-1715-4345-82a0-c53a64569957-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.184036 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rvlgw\" (UniqueName: \"kubernetes.io/projected/45d19232-1715-4345-82a0-c53a64569957-kube-api-access-rvlgw\") on node \"crc\" DevicePath \"\"" Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.783335 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-9d6qc" event={"ID":"45d19232-1715-4345-82a0-c53a64569957","Type":"ContainerDied","Data":"5ceb7c79657d470d2592623f23c1519e48c0746954fd9ab4e0507683f58bf5b9"} Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.783385 5048 scope.go:117] "RemoveContainer" containerID="bb48f4983fa71d6e148d1054ef3e84cf731b244045dd4a194cf685a98c5a118d" Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.783516 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-9d6qc" Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.807099 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-9d6qc"] Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.809790 5048 scope.go:117] "RemoveContainer" containerID="8416cf6a5e6d26dfc3d7aa1e44adb5b8802472370b902a3d061d19deeeb5b17f" Dec 13 06:53:46 crc kubenswrapper[5048]: I1213 06:53:46.817492 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-9d6qc"] Dec 13 06:53:48 crc kubenswrapper[5048]: I1213 06:53:48.577199 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45d19232-1715-4345-82a0-c53a64569957" path="/var/lib/kubelet/pods/45d19232-1715-4345-82a0-c53a64569957/volumes" Dec 13 06:53:57 crc kubenswrapper[5048]: I1213 06:53:57.903109 5048 generic.go:334] "Generic (PLEG): container finished" podID="b6586a43-004c-41f4-9172-b3b385849341" containerID="ade3ff8a56872eea72f69b33d86f432c0b4f4ec977af84e6f7c4c1f91844f0bf" exitCode=0 Dec 13 06:53:57 crc kubenswrapper[5048]: I1213 06:53:57.903904 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b6586a43-004c-41f4-9172-b3b385849341","Type":"ContainerDied","Data":"ade3ff8a56872eea72f69b33d86f432c0b4f4ec977af84e6f7c4c1f91844f0bf"} Dec 13 06:53:57 crc kubenswrapper[5048]: I1213 06:53:57.910884 5048 generic.go:334] "Generic (PLEG): container finished" podID="cd2c9077-4969-4d54-a677-2f84128c1a13" containerID="e9f10099e3482d309e58d5e9a145d9ffed177ea74b038b39fbf1da9b73488295" exitCode=0 Dec 13 06:53:57 crc kubenswrapper[5048]: I1213 06:53:57.910953 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"cd2c9077-4969-4d54-a677-2f84128c1a13","Type":"ContainerDied","Data":"e9f10099e3482d309e58d5e9a145d9ffed177ea74b038b39fbf1da9b73488295"} Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.019108 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c"] Dec 13 06:53:58 crc kubenswrapper[5048]: E1213 06:53:58.019515 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45d19232-1715-4345-82a0-c53a64569957" containerName="init" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.019527 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="45d19232-1715-4345-82a0-c53a64569957" containerName="init" Dec 13 06:53:58 crc kubenswrapper[5048]: E1213 06:53:58.019543 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048" containerName="init" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.019549 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048" containerName="init" Dec 13 06:53:58 crc kubenswrapper[5048]: E1213 06:53:58.019565 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45d19232-1715-4345-82a0-c53a64569957" containerName="dnsmasq-dns" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.019571 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="45d19232-1715-4345-82a0-c53a64569957" containerName="dnsmasq-dns" Dec 13 06:53:58 crc kubenswrapper[5048]: E1213 06:53:58.019590 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048" containerName="dnsmasq-dns" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.019597 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048" containerName="dnsmasq-dns" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.019772 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="45d19232-1715-4345-82a0-c53a64569957" containerName="dnsmasq-dns" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.019793 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="92e0ee4a-6f6b-4e3c-85c4-2cd3c8555048" containerName="dnsmasq-dns" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.020583 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.023793 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.024088 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.024236 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.025834 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hgp7p" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.040157 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c"] Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.129423 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29c60e6c-e671-43eb-ad63-2ccf40ef5719-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c\" (UID: \"29c60e6c-e671-43eb-ad63-2ccf40ef5719\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.129502 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29c60e6c-e671-43eb-ad63-2ccf40ef5719-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c\" (UID: \"29c60e6c-e671-43eb-ad63-2ccf40ef5719\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.129723 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rb8xc\" (UniqueName: \"kubernetes.io/projected/29c60e6c-e671-43eb-ad63-2ccf40ef5719-kube-api-access-rb8xc\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c\" (UID: \"29c60e6c-e671-43eb-ad63-2ccf40ef5719\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.129851 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29c60e6c-e671-43eb-ad63-2ccf40ef5719-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c\" (UID: \"29c60e6c-e671-43eb-ad63-2ccf40ef5719\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.231773 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29c60e6c-e671-43eb-ad63-2ccf40ef5719-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c\" (UID: \"29c60e6c-e671-43eb-ad63-2ccf40ef5719\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.231836 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29c60e6c-e671-43eb-ad63-2ccf40ef5719-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c\" (UID: \"29c60e6c-e671-43eb-ad63-2ccf40ef5719\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.231908 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rb8xc\" (UniqueName: \"kubernetes.io/projected/29c60e6c-e671-43eb-ad63-2ccf40ef5719-kube-api-access-rb8xc\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c\" (UID: \"29c60e6c-e671-43eb-ad63-2ccf40ef5719\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.231949 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29c60e6c-e671-43eb-ad63-2ccf40ef5719-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c\" (UID: \"29c60e6c-e671-43eb-ad63-2ccf40ef5719\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.236577 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29c60e6c-e671-43eb-ad63-2ccf40ef5719-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c\" (UID: \"29c60e6c-e671-43eb-ad63-2ccf40ef5719\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.237607 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29c60e6c-e671-43eb-ad63-2ccf40ef5719-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c\" (UID: \"29c60e6c-e671-43eb-ad63-2ccf40ef5719\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.238130 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29c60e6c-e671-43eb-ad63-2ccf40ef5719-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c\" (UID: \"29c60e6c-e671-43eb-ad63-2ccf40ef5719\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.250509 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rb8xc\" (UniqueName: \"kubernetes.io/projected/29c60e6c-e671-43eb-ad63-2ccf40ef5719-kube-api-access-rb8xc\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c\" (UID: \"29c60e6c-e671-43eb-ad63-2ccf40ef5719\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.468841 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.930166 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b6586a43-004c-41f4-9172-b3b385849341","Type":"ContainerStarted","Data":"80e506e91e2f57b3fd9252159b37210e5bdec3212a3fb7f716ebd9e31b51346f"} Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.930797 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.932888 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"cd2c9077-4969-4d54-a677-2f84128c1a13","Type":"ContainerStarted","Data":"98f57e93539082119bcf442c3648f9f63d9ae7a3dae32619e638e72b56c55f5f"} Dec 13 06:53:58 crc kubenswrapper[5048]: I1213 06:53:58.933686 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Dec 13 06:53:59 crc kubenswrapper[5048]: I1213 06:53:59.006604 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.006583397 podStartE2EDuration="37.006583397s" podCreationTimestamp="2025-12-13 06:53:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:53:59.001633081 +0000 UTC m=+1472.868227682" watchObservedRunningTime="2025-12-13 06:53:59.006583397 +0000 UTC m=+1472.873177968" Dec 13 06:53:59 crc kubenswrapper[5048]: I1213 06:53:59.009980 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.00996454 podStartE2EDuration="37.00996454s" podCreationTimestamp="2025-12-13 06:53:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 06:53:58.963812969 +0000 UTC m=+1472.830407570" watchObservedRunningTime="2025-12-13 06:53:59.00996454 +0000 UTC m=+1472.876559121" Dec 13 06:53:59 crc kubenswrapper[5048]: I1213 06:53:59.046149 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c"] Dec 13 06:53:59 crc kubenswrapper[5048]: W1213 06:53:59.048330 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod29c60e6c_e671_43eb_ad63_2ccf40ef5719.slice/crio-30666c6f8b89e3a3c24569de823f150d7c29f590b3b98e262b0a9631f30f675d WatchSource:0}: Error finding container 30666c6f8b89e3a3c24569de823f150d7c29f590b3b98e262b0a9631f30f675d: Status 404 returned error can't find the container with id 30666c6f8b89e3a3c24569de823f150d7c29f590b3b98e262b0a9631f30f675d Dec 13 06:53:59 crc kubenswrapper[5048]: I1213 06:53:59.946710 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c" event={"ID":"29c60e6c-e671-43eb-ad63-2ccf40ef5719","Type":"ContainerStarted","Data":"30666c6f8b89e3a3c24569de823f150d7c29f590b3b98e262b0a9631f30f675d"} Dec 13 06:54:11 crc kubenswrapper[5048]: I1213 06:54:11.055374 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c" event={"ID":"29c60e6c-e671-43eb-ad63-2ccf40ef5719","Type":"ContainerStarted","Data":"b30dd7592074e07f749c44610a7e440e87ee9373741d5c0913450df22d7ded5b"} Dec 13 06:54:11 crc kubenswrapper[5048]: I1213 06:54:11.073517 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c" podStartSLOduration=3.206832229 podStartE2EDuration="14.073425934s" podCreationTimestamp="2025-12-13 06:53:57 +0000 UTC" firstStartedPulling="2025-12-13 06:53:59.051253447 +0000 UTC m=+1472.917848028" lastFinishedPulling="2025-12-13 06:54:09.917847152 +0000 UTC m=+1483.784441733" observedRunningTime="2025-12-13 06:54:11.073199988 +0000 UTC m=+1484.939794579" watchObservedRunningTime="2025-12-13 06:54:11.073425934 +0000 UTC m=+1484.940020515" Dec 13 06:54:13 crc kubenswrapper[5048]: I1213 06:54:13.166649 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Dec 13 06:54:13 crc kubenswrapper[5048]: I1213 06:54:13.250701 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Dec 13 06:54:23 crc kubenswrapper[5048]: I1213 06:54:23.160947 5048 generic.go:334] "Generic (PLEG): container finished" podID="29c60e6c-e671-43eb-ad63-2ccf40ef5719" containerID="b30dd7592074e07f749c44610a7e440e87ee9373741d5c0913450df22d7ded5b" exitCode=0 Dec 13 06:54:23 crc kubenswrapper[5048]: I1213 06:54:23.161052 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c" event={"ID":"29c60e6c-e671-43eb-ad63-2ccf40ef5719","Type":"ContainerDied","Data":"b30dd7592074e07f749c44610a7e440e87ee9373741d5c0913450df22d7ded5b"} Dec 13 06:54:24 crc kubenswrapper[5048]: I1213 06:54:24.588758 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c" Dec 13 06:54:24 crc kubenswrapper[5048]: I1213 06:54:24.617046 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29c60e6c-e671-43eb-ad63-2ccf40ef5719-inventory\") pod \"29c60e6c-e671-43eb-ad63-2ccf40ef5719\" (UID: \"29c60e6c-e671-43eb-ad63-2ccf40ef5719\") " Dec 13 06:54:24 crc kubenswrapper[5048]: I1213 06:54:24.629672 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29c60e6c-e671-43eb-ad63-2ccf40ef5719-repo-setup-combined-ca-bundle\") pod \"29c60e6c-e671-43eb-ad63-2ccf40ef5719\" (UID: \"29c60e6c-e671-43eb-ad63-2ccf40ef5719\") " Dec 13 06:54:24 crc kubenswrapper[5048]: I1213 06:54:24.629841 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rb8xc\" (UniqueName: \"kubernetes.io/projected/29c60e6c-e671-43eb-ad63-2ccf40ef5719-kube-api-access-rb8xc\") pod \"29c60e6c-e671-43eb-ad63-2ccf40ef5719\" (UID: \"29c60e6c-e671-43eb-ad63-2ccf40ef5719\") " Dec 13 06:54:24 crc kubenswrapper[5048]: I1213 06:54:24.629938 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29c60e6c-e671-43eb-ad63-2ccf40ef5719-ssh-key\") pod \"29c60e6c-e671-43eb-ad63-2ccf40ef5719\" (UID: \"29c60e6c-e671-43eb-ad63-2ccf40ef5719\") " Dec 13 06:54:24 crc kubenswrapper[5048]: I1213 06:54:24.634921 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29c60e6c-e671-43eb-ad63-2ccf40ef5719-kube-api-access-rb8xc" (OuterVolumeSpecName: "kube-api-access-rb8xc") pod "29c60e6c-e671-43eb-ad63-2ccf40ef5719" (UID: "29c60e6c-e671-43eb-ad63-2ccf40ef5719"). InnerVolumeSpecName "kube-api-access-rb8xc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:54:24 crc kubenswrapper[5048]: I1213 06:54:24.636603 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29c60e6c-e671-43eb-ad63-2ccf40ef5719-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "29c60e6c-e671-43eb-ad63-2ccf40ef5719" (UID: "29c60e6c-e671-43eb-ad63-2ccf40ef5719"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:54:24 crc kubenswrapper[5048]: I1213 06:54:24.653660 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29c60e6c-e671-43eb-ad63-2ccf40ef5719-inventory" (OuterVolumeSpecName: "inventory") pod "29c60e6c-e671-43eb-ad63-2ccf40ef5719" (UID: "29c60e6c-e671-43eb-ad63-2ccf40ef5719"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:54:24 crc kubenswrapper[5048]: I1213 06:54:24.660420 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29c60e6c-e671-43eb-ad63-2ccf40ef5719-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "29c60e6c-e671-43eb-ad63-2ccf40ef5719" (UID: "29c60e6c-e671-43eb-ad63-2ccf40ef5719"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:54:24 crc kubenswrapper[5048]: I1213 06:54:24.733466 5048 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29c60e6c-e671-43eb-ad63-2ccf40ef5719-inventory\") on node \"crc\" DevicePath \"\"" Dec 13 06:54:24 crc kubenswrapper[5048]: I1213 06:54:24.733508 5048 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29c60e6c-e671-43eb-ad63-2ccf40ef5719-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:54:24 crc kubenswrapper[5048]: I1213 06:54:24.733527 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rb8xc\" (UniqueName: \"kubernetes.io/projected/29c60e6c-e671-43eb-ad63-2ccf40ef5719-kube-api-access-rb8xc\") on node \"crc\" DevicePath \"\"" Dec 13 06:54:24 crc kubenswrapper[5048]: I1213 06:54:24.733537 5048 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29c60e6c-e671-43eb-ad63-2ccf40ef5719-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 13 06:54:25 crc kubenswrapper[5048]: I1213 06:54:25.179754 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c" event={"ID":"29c60e6c-e671-43eb-ad63-2ccf40ef5719","Type":"ContainerDied","Data":"30666c6f8b89e3a3c24569de823f150d7c29f590b3b98e262b0a9631f30f675d"} Dec 13 06:54:25 crc kubenswrapper[5048]: I1213 06:54:25.179816 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="30666c6f8b89e3a3c24569de823f150d7c29f590b3b98e262b0a9631f30f675d" Dec 13 06:54:25 crc kubenswrapper[5048]: I1213 06:54:25.179845 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c" Dec 13 06:54:25 crc kubenswrapper[5048]: I1213 06:54:25.270768 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-rrx8r"] Dec 13 06:54:25 crc kubenswrapper[5048]: E1213 06:54:25.271594 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29c60e6c-e671-43eb-ad63-2ccf40ef5719" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 13 06:54:25 crc kubenswrapper[5048]: I1213 06:54:25.271617 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="29c60e6c-e671-43eb-ad63-2ccf40ef5719" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 13 06:54:25 crc kubenswrapper[5048]: I1213 06:54:25.271810 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="29c60e6c-e671-43eb-ad63-2ccf40ef5719" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 13 06:54:25 crc kubenswrapper[5048]: I1213 06:54:25.272528 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rrx8r" Dec 13 06:54:25 crc kubenswrapper[5048]: I1213 06:54:25.274966 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 13 06:54:25 crc kubenswrapper[5048]: I1213 06:54:25.276084 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hgp7p" Dec 13 06:54:25 crc kubenswrapper[5048]: I1213 06:54:25.276853 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 13 06:54:25 crc kubenswrapper[5048]: I1213 06:54:25.289611 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 13 06:54:25 crc kubenswrapper[5048]: I1213 06:54:25.293540 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-rrx8r"] Dec 13 06:54:25 crc kubenswrapper[5048]: I1213 06:54:25.345807 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/db18a520-b418-4ee9-bed9-d72c023c9959-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rrx8r\" (UID: \"db18a520-b418-4ee9-bed9-d72c023c9959\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rrx8r" Dec 13 06:54:25 crc kubenswrapper[5048]: I1213 06:54:25.345886 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/db18a520-b418-4ee9-bed9-d72c023c9959-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rrx8r\" (UID: \"db18a520-b418-4ee9-bed9-d72c023c9959\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rrx8r" Dec 13 06:54:25 crc kubenswrapper[5048]: I1213 06:54:25.345960 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-plmc2\" (UniqueName: \"kubernetes.io/projected/db18a520-b418-4ee9-bed9-d72c023c9959-kube-api-access-plmc2\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rrx8r\" (UID: \"db18a520-b418-4ee9-bed9-d72c023c9959\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rrx8r" Dec 13 06:54:25 crc kubenswrapper[5048]: I1213 06:54:25.447883 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/db18a520-b418-4ee9-bed9-d72c023c9959-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rrx8r\" (UID: \"db18a520-b418-4ee9-bed9-d72c023c9959\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rrx8r" Dec 13 06:54:25 crc kubenswrapper[5048]: I1213 06:54:25.448014 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-plmc2\" (UniqueName: \"kubernetes.io/projected/db18a520-b418-4ee9-bed9-d72c023c9959-kube-api-access-plmc2\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rrx8r\" (UID: \"db18a520-b418-4ee9-bed9-d72c023c9959\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rrx8r" Dec 13 06:54:25 crc kubenswrapper[5048]: I1213 06:54:25.448111 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/db18a520-b418-4ee9-bed9-d72c023c9959-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rrx8r\" (UID: \"db18a520-b418-4ee9-bed9-d72c023c9959\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rrx8r" Dec 13 06:54:25 crc kubenswrapper[5048]: I1213 06:54:25.452958 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/db18a520-b418-4ee9-bed9-d72c023c9959-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rrx8r\" (UID: \"db18a520-b418-4ee9-bed9-d72c023c9959\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rrx8r" Dec 13 06:54:25 crc kubenswrapper[5048]: I1213 06:54:25.452958 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/db18a520-b418-4ee9-bed9-d72c023c9959-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rrx8r\" (UID: \"db18a520-b418-4ee9-bed9-d72c023c9959\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rrx8r" Dec 13 06:54:25 crc kubenswrapper[5048]: I1213 06:54:25.469242 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-plmc2\" (UniqueName: \"kubernetes.io/projected/db18a520-b418-4ee9-bed9-d72c023c9959-kube-api-access-plmc2\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rrx8r\" (UID: \"db18a520-b418-4ee9-bed9-d72c023c9959\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rrx8r" Dec 13 06:54:25 crc kubenswrapper[5048]: I1213 06:54:25.598413 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rrx8r" Dec 13 06:54:26 crc kubenswrapper[5048]: I1213 06:54:26.101660 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-rrx8r"] Dec 13 06:54:26 crc kubenswrapper[5048]: I1213 06:54:26.189748 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rrx8r" event={"ID":"db18a520-b418-4ee9-bed9-d72c023c9959","Type":"ContainerStarted","Data":"9766ea8158e806349d1a89fc6e24d87e3081da48a0f668af99400f5e9ddb8466"} Dec 13 06:54:26 crc kubenswrapper[5048]: I1213 06:54:26.628755 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 13 06:54:28 crc kubenswrapper[5048]: I1213 06:54:28.211380 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rrx8r" event={"ID":"db18a520-b418-4ee9-bed9-d72c023c9959","Type":"ContainerStarted","Data":"47e62dc582f3bf300f7a8d8a256be36e00bd23184061d763fa372881d4e7ad88"} Dec 13 06:54:28 crc kubenswrapper[5048]: I1213 06:54:28.235861 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rrx8r" podStartSLOduration=2.717619985 podStartE2EDuration="3.235847412s" podCreationTimestamp="2025-12-13 06:54:25 +0000 UTC" firstStartedPulling="2025-12-13 06:54:26.108018015 +0000 UTC m=+1499.974612586" lastFinishedPulling="2025-12-13 06:54:26.626245432 +0000 UTC m=+1500.492840013" observedRunningTime="2025-12-13 06:54:28.234198988 +0000 UTC m=+1502.100793569" watchObservedRunningTime="2025-12-13 06:54:28.235847412 +0000 UTC m=+1502.102441993" Dec 13 06:54:30 crc kubenswrapper[5048]: I1213 06:54:30.229180 5048 generic.go:334] "Generic (PLEG): container finished" podID="db18a520-b418-4ee9-bed9-d72c023c9959" containerID="47e62dc582f3bf300f7a8d8a256be36e00bd23184061d763fa372881d4e7ad88" exitCode=0 Dec 13 06:54:30 crc kubenswrapper[5048]: I1213 06:54:30.229277 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rrx8r" event={"ID":"db18a520-b418-4ee9-bed9-d72c023c9959","Type":"ContainerDied","Data":"47e62dc582f3bf300f7a8d8a256be36e00bd23184061d763fa372881d4e7ad88"} Dec 13 06:54:31 crc kubenswrapper[5048]: I1213 06:54:31.919884 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rrx8r" Dec 13 06:54:31 crc kubenswrapper[5048]: I1213 06:54:31.930727 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-plmc2\" (UniqueName: \"kubernetes.io/projected/db18a520-b418-4ee9-bed9-d72c023c9959-kube-api-access-plmc2\") pod \"db18a520-b418-4ee9-bed9-d72c023c9959\" (UID: \"db18a520-b418-4ee9-bed9-d72c023c9959\") " Dec 13 06:54:31 crc kubenswrapper[5048]: I1213 06:54:31.931313 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/db18a520-b418-4ee9-bed9-d72c023c9959-inventory\") pod \"db18a520-b418-4ee9-bed9-d72c023c9959\" (UID: \"db18a520-b418-4ee9-bed9-d72c023c9959\") " Dec 13 06:54:31 crc kubenswrapper[5048]: I1213 06:54:31.931907 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/db18a520-b418-4ee9-bed9-d72c023c9959-ssh-key\") pod \"db18a520-b418-4ee9-bed9-d72c023c9959\" (UID: \"db18a520-b418-4ee9-bed9-d72c023c9959\") " Dec 13 06:54:31 crc kubenswrapper[5048]: I1213 06:54:31.950005 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db18a520-b418-4ee9-bed9-d72c023c9959-kube-api-access-plmc2" (OuterVolumeSpecName: "kube-api-access-plmc2") pod "db18a520-b418-4ee9-bed9-d72c023c9959" (UID: "db18a520-b418-4ee9-bed9-d72c023c9959"). InnerVolumeSpecName "kube-api-access-plmc2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:54:31 crc kubenswrapper[5048]: I1213 06:54:31.967591 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db18a520-b418-4ee9-bed9-d72c023c9959-inventory" (OuterVolumeSpecName: "inventory") pod "db18a520-b418-4ee9-bed9-d72c023c9959" (UID: "db18a520-b418-4ee9-bed9-d72c023c9959"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:54:31 crc kubenswrapper[5048]: I1213 06:54:31.971669 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db18a520-b418-4ee9-bed9-d72c023c9959-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "db18a520-b418-4ee9-bed9-d72c023c9959" (UID: "db18a520-b418-4ee9-bed9-d72c023c9959"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:54:32 crc kubenswrapper[5048]: I1213 06:54:32.033870 5048 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/db18a520-b418-4ee9-bed9-d72c023c9959-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 13 06:54:32 crc kubenswrapper[5048]: I1213 06:54:32.033907 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-plmc2\" (UniqueName: \"kubernetes.io/projected/db18a520-b418-4ee9-bed9-d72c023c9959-kube-api-access-plmc2\") on node \"crc\" DevicePath \"\"" Dec 13 06:54:32 crc kubenswrapper[5048]: I1213 06:54:32.033919 5048 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/db18a520-b418-4ee9-bed9-d72c023c9959-inventory\") on node \"crc\" DevicePath \"\"" Dec 13 06:54:32 crc kubenswrapper[5048]: I1213 06:54:32.252632 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rrx8r" event={"ID":"db18a520-b418-4ee9-bed9-d72c023c9959","Type":"ContainerDied","Data":"9766ea8158e806349d1a89fc6e24d87e3081da48a0f668af99400f5e9ddb8466"} Dec 13 06:54:32 crc kubenswrapper[5048]: I1213 06:54:32.253068 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9766ea8158e806349d1a89fc6e24d87e3081da48a0f668af99400f5e9ddb8466" Dec 13 06:54:32 crc kubenswrapper[5048]: I1213 06:54:32.252682 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rrx8r" Dec 13 06:54:33 crc kubenswrapper[5048]: I1213 06:54:33.216195 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx"] Dec 13 06:54:33 crc kubenswrapper[5048]: E1213 06:54:33.216738 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db18a520-b418-4ee9-bed9-d72c023c9959" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 13 06:54:33 crc kubenswrapper[5048]: I1213 06:54:33.216754 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="db18a520-b418-4ee9-bed9-d72c023c9959" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 13 06:54:33 crc kubenswrapper[5048]: I1213 06:54:33.216922 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="db18a520-b418-4ee9-bed9-d72c023c9959" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 13 06:54:33 crc kubenswrapper[5048]: I1213 06:54:33.217803 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx" Dec 13 06:54:33 crc kubenswrapper[5048]: I1213 06:54:33.219876 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 13 06:54:33 crc kubenswrapper[5048]: I1213 06:54:33.220357 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hgp7p" Dec 13 06:54:33 crc kubenswrapper[5048]: I1213 06:54:33.220710 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 13 06:54:33 crc kubenswrapper[5048]: I1213 06:54:33.220858 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 13 06:54:33 crc kubenswrapper[5048]: I1213 06:54:33.227397 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx"] Dec 13 06:54:33 crc kubenswrapper[5048]: I1213 06:54:33.293361 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/868a604c-3a79-4945-b54e-950797bed05d-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx\" (UID: \"868a604c-3a79-4945-b54e-950797bed05d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx" Dec 13 06:54:33 crc kubenswrapper[5048]: I1213 06:54:33.293490 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/868a604c-3a79-4945-b54e-950797bed05d-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx\" (UID: \"868a604c-3a79-4945-b54e-950797bed05d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx" Dec 13 06:54:33 crc kubenswrapper[5048]: I1213 06:54:33.293552 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jw8f6\" (UniqueName: \"kubernetes.io/projected/868a604c-3a79-4945-b54e-950797bed05d-kube-api-access-jw8f6\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx\" (UID: \"868a604c-3a79-4945-b54e-950797bed05d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx" Dec 13 06:54:33 crc kubenswrapper[5048]: I1213 06:54:33.293672 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/868a604c-3a79-4945-b54e-950797bed05d-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx\" (UID: \"868a604c-3a79-4945-b54e-950797bed05d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx" Dec 13 06:54:33 crc kubenswrapper[5048]: I1213 06:54:33.395400 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/868a604c-3a79-4945-b54e-950797bed05d-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx\" (UID: \"868a604c-3a79-4945-b54e-950797bed05d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx" Dec 13 06:54:33 crc kubenswrapper[5048]: I1213 06:54:33.395498 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/868a604c-3a79-4945-b54e-950797bed05d-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx\" (UID: \"868a604c-3a79-4945-b54e-950797bed05d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx" Dec 13 06:54:33 crc kubenswrapper[5048]: I1213 06:54:33.395549 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jw8f6\" (UniqueName: \"kubernetes.io/projected/868a604c-3a79-4945-b54e-950797bed05d-kube-api-access-jw8f6\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx\" (UID: \"868a604c-3a79-4945-b54e-950797bed05d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx" Dec 13 06:54:33 crc kubenswrapper[5048]: I1213 06:54:33.395621 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/868a604c-3a79-4945-b54e-950797bed05d-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx\" (UID: \"868a604c-3a79-4945-b54e-950797bed05d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx" Dec 13 06:54:33 crc kubenswrapper[5048]: I1213 06:54:33.401675 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/868a604c-3a79-4945-b54e-950797bed05d-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx\" (UID: \"868a604c-3a79-4945-b54e-950797bed05d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx" Dec 13 06:54:33 crc kubenswrapper[5048]: I1213 06:54:33.407994 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/868a604c-3a79-4945-b54e-950797bed05d-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx\" (UID: \"868a604c-3a79-4945-b54e-950797bed05d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx" Dec 13 06:54:33 crc kubenswrapper[5048]: I1213 06:54:33.414379 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/868a604c-3a79-4945-b54e-950797bed05d-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx\" (UID: \"868a604c-3a79-4945-b54e-950797bed05d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx" Dec 13 06:54:33 crc kubenswrapper[5048]: I1213 06:54:33.419150 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jw8f6\" (UniqueName: \"kubernetes.io/projected/868a604c-3a79-4945-b54e-950797bed05d-kube-api-access-jw8f6\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx\" (UID: \"868a604c-3a79-4945-b54e-950797bed05d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx" Dec 13 06:54:33 crc kubenswrapper[5048]: I1213 06:54:33.537514 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx" Dec 13 06:54:34 crc kubenswrapper[5048]: I1213 06:54:34.066094 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx"] Dec 13 06:54:34 crc kubenswrapper[5048]: I1213 06:54:34.270707 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx" event={"ID":"868a604c-3a79-4945-b54e-950797bed05d","Type":"ContainerStarted","Data":"18fe2b17558f16900a190a6a30e7616d7d59197f652a996e602ea871d39d422f"} Dec 13 06:54:35 crc kubenswrapper[5048]: I1213 06:54:35.283379 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx" event={"ID":"868a604c-3a79-4945-b54e-950797bed05d","Type":"ContainerStarted","Data":"1dfc350495e2a664ffdb370a50be02270d0319f2f17cfef2abaa789bea3d585c"} Dec 13 06:54:35 crc kubenswrapper[5048]: I1213 06:54:35.440379 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx" podStartSLOduration=1.9126473929999999 podStartE2EDuration="2.440357325s" podCreationTimestamp="2025-12-13 06:54:33 +0000 UTC" firstStartedPulling="2025-12-13 06:54:34.074479956 +0000 UTC m=+1507.941074537" lastFinishedPulling="2025-12-13 06:54:34.602189888 +0000 UTC m=+1508.468784469" observedRunningTime="2025-12-13 06:54:35.425305301 +0000 UTC m=+1509.291899882" watchObservedRunningTime="2025-12-13 06:54:35.440357325 +0000 UTC m=+1509.306951906" Dec 13 06:54:51 crc kubenswrapper[5048]: I1213 06:54:51.728853 5048 scope.go:117] "RemoveContainer" containerID="30ab5c4147986006ad8791488083740ccea5352177bdcb3ae2e820d24177a5a4" Dec 13 06:55:08 crc kubenswrapper[5048]: I1213 06:55:08.759387 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qfqd9"] Dec 13 06:55:08 crc kubenswrapper[5048]: I1213 06:55:08.761918 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qfqd9" Dec 13 06:55:08 crc kubenswrapper[5048]: I1213 06:55:08.767634 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qfqd9"] Dec 13 06:55:08 crc kubenswrapper[5048]: I1213 06:55:08.887318 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa4c3efc-7e32-4175-8e21-3929f1992fae-catalog-content\") pod \"redhat-marketplace-qfqd9\" (UID: \"fa4c3efc-7e32-4175-8e21-3929f1992fae\") " pod="openshift-marketplace/redhat-marketplace-qfqd9" Dec 13 06:55:08 crc kubenswrapper[5048]: I1213 06:55:08.887394 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa4c3efc-7e32-4175-8e21-3929f1992fae-utilities\") pod \"redhat-marketplace-qfqd9\" (UID: \"fa4c3efc-7e32-4175-8e21-3929f1992fae\") " pod="openshift-marketplace/redhat-marketplace-qfqd9" Dec 13 06:55:08 crc kubenswrapper[5048]: I1213 06:55:08.887452 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cpp2z\" (UniqueName: \"kubernetes.io/projected/fa4c3efc-7e32-4175-8e21-3929f1992fae-kube-api-access-cpp2z\") pod \"redhat-marketplace-qfqd9\" (UID: \"fa4c3efc-7e32-4175-8e21-3929f1992fae\") " pod="openshift-marketplace/redhat-marketplace-qfqd9" Dec 13 06:55:08 crc kubenswrapper[5048]: I1213 06:55:08.994829 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa4c3efc-7e32-4175-8e21-3929f1992fae-catalog-content\") pod \"redhat-marketplace-qfqd9\" (UID: \"fa4c3efc-7e32-4175-8e21-3929f1992fae\") " pod="openshift-marketplace/redhat-marketplace-qfqd9" Dec 13 06:55:08 crc kubenswrapper[5048]: I1213 06:55:08.994889 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa4c3efc-7e32-4175-8e21-3929f1992fae-utilities\") pod \"redhat-marketplace-qfqd9\" (UID: \"fa4c3efc-7e32-4175-8e21-3929f1992fae\") " pod="openshift-marketplace/redhat-marketplace-qfqd9" Dec 13 06:55:08 crc kubenswrapper[5048]: I1213 06:55:08.994942 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cpp2z\" (UniqueName: \"kubernetes.io/projected/fa4c3efc-7e32-4175-8e21-3929f1992fae-kube-api-access-cpp2z\") pod \"redhat-marketplace-qfqd9\" (UID: \"fa4c3efc-7e32-4175-8e21-3929f1992fae\") " pod="openshift-marketplace/redhat-marketplace-qfqd9" Dec 13 06:55:08 crc kubenswrapper[5048]: I1213 06:55:08.995822 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa4c3efc-7e32-4175-8e21-3929f1992fae-catalog-content\") pod \"redhat-marketplace-qfqd9\" (UID: \"fa4c3efc-7e32-4175-8e21-3929f1992fae\") " pod="openshift-marketplace/redhat-marketplace-qfqd9" Dec 13 06:55:08 crc kubenswrapper[5048]: I1213 06:55:08.996168 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa4c3efc-7e32-4175-8e21-3929f1992fae-utilities\") pod \"redhat-marketplace-qfqd9\" (UID: \"fa4c3efc-7e32-4175-8e21-3929f1992fae\") " pod="openshift-marketplace/redhat-marketplace-qfqd9" Dec 13 06:55:09 crc kubenswrapper[5048]: I1213 06:55:09.017206 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cpp2z\" (UniqueName: \"kubernetes.io/projected/fa4c3efc-7e32-4175-8e21-3929f1992fae-kube-api-access-cpp2z\") pod \"redhat-marketplace-qfqd9\" (UID: \"fa4c3efc-7e32-4175-8e21-3929f1992fae\") " pod="openshift-marketplace/redhat-marketplace-qfqd9" Dec 13 06:55:09 crc kubenswrapper[5048]: I1213 06:55:09.084213 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qfqd9" Dec 13 06:55:09 crc kubenswrapper[5048]: I1213 06:55:09.589245 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qfqd9"] Dec 13 06:55:10 crc kubenswrapper[5048]: I1213 06:55:10.596426 5048 generic.go:334] "Generic (PLEG): container finished" podID="fa4c3efc-7e32-4175-8e21-3929f1992fae" containerID="d489cd812c2b014edbb21a775e2639f571052b8fd1b18112b52c43c14f1dbc64" exitCode=0 Dec 13 06:55:10 crc kubenswrapper[5048]: I1213 06:55:10.596534 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qfqd9" event={"ID":"fa4c3efc-7e32-4175-8e21-3929f1992fae","Type":"ContainerDied","Data":"d489cd812c2b014edbb21a775e2639f571052b8fd1b18112b52c43c14f1dbc64"} Dec 13 06:55:10 crc kubenswrapper[5048]: I1213 06:55:10.596755 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qfqd9" event={"ID":"fa4c3efc-7e32-4175-8e21-3929f1992fae","Type":"ContainerStarted","Data":"9614cca8472ab85043d9aee681ed5ccaa73dac3e81123f13d179fbe104c0f50f"} Dec 13 06:55:11 crc kubenswrapper[5048]: I1213 06:55:11.607869 5048 generic.go:334] "Generic (PLEG): container finished" podID="fa4c3efc-7e32-4175-8e21-3929f1992fae" containerID="b69d3fabd5b57a9e5f2d773f052a1db758f3b06eda0c08db7ce4ebd5ecc6784b" exitCode=0 Dec 13 06:55:11 crc kubenswrapper[5048]: I1213 06:55:11.607943 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qfqd9" event={"ID":"fa4c3efc-7e32-4175-8e21-3929f1992fae","Type":"ContainerDied","Data":"b69d3fabd5b57a9e5f2d773f052a1db758f3b06eda0c08db7ce4ebd5ecc6784b"} Dec 13 06:55:12 crc kubenswrapper[5048]: I1213 06:55:12.620874 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qfqd9" event={"ID":"fa4c3efc-7e32-4175-8e21-3929f1992fae","Type":"ContainerStarted","Data":"1836452fa3d42974628dbe02f1341f7df3a4d171511d8f43b115e56ea653baa0"} Dec 13 06:55:12 crc kubenswrapper[5048]: I1213 06:55:12.644816 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qfqd9" podStartSLOduration=3.22828215 podStartE2EDuration="4.644800993s" podCreationTimestamp="2025-12-13 06:55:08 +0000 UTC" firstStartedPulling="2025-12-13 06:55:10.600048033 +0000 UTC m=+1544.466642614" lastFinishedPulling="2025-12-13 06:55:12.016566866 +0000 UTC m=+1545.883161457" observedRunningTime="2025-12-13 06:55:12.642917083 +0000 UTC m=+1546.509511664" watchObservedRunningTime="2025-12-13 06:55:12.644800993 +0000 UTC m=+1546.511395574" Dec 13 06:55:16 crc kubenswrapper[5048]: I1213 06:55:16.216294 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:55:16 crc kubenswrapper[5048]: I1213 06:55:16.216953 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:55:19 crc kubenswrapper[5048]: I1213 06:55:19.084884 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qfqd9" Dec 13 06:55:19 crc kubenswrapper[5048]: I1213 06:55:19.085757 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qfqd9" Dec 13 06:55:19 crc kubenswrapper[5048]: I1213 06:55:19.135271 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qfqd9" Dec 13 06:55:19 crc kubenswrapper[5048]: I1213 06:55:19.758005 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qfqd9" Dec 13 06:55:19 crc kubenswrapper[5048]: I1213 06:55:19.813120 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qfqd9"] Dec 13 06:55:21 crc kubenswrapper[5048]: I1213 06:55:21.713648 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qfqd9" podUID="fa4c3efc-7e32-4175-8e21-3929f1992fae" containerName="registry-server" containerID="cri-o://1836452fa3d42974628dbe02f1341f7df3a4d171511d8f43b115e56ea653baa0" gracePeriod=2 Dec 13 06:55:23 crc kubenswrapper[5048]: I1213 06:55:22.724014 5048 generic.go:334] "Generic (PLEG): container finished" podID="fa4c3efc-7e32-4175-8e21-3929f1992fae" containerID="1836452fa3d42974628dbe02f1341f7df3a4d171511d8f43b115e56ea653baa0" exitCode=0 Dec 13 06:55:23 crc kubenswrapper[5048]: I1213 06:55:22.724083 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qfqd9" event={"ID":"fa4c3efc-7e32-4175-8e21-3929f1992fae","Type":"ContainerDied","Data":"1836452fa3d42974628dbe02f1341f7df3a4d171511d8f43b115e56ea653baa0"} Dec 13 06:55:23 crc kubenswrapper[5048]: I1213 06:55:22.724525 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qfqd9" event={"ID":"fa4c3efc-7e32-4175-8e21-3929f1992fae","Type":"ContainerDied","Data":"9614cca8472ab85043d9aee681ed5ccaa73dac3e81123f13d179fbe104c0f50f"} Dec 13 06:55:23 crc kubenswrapper[5048]: I1213 06:55:22.724551 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9614cca8472ab85043d9aee681ed5ccaa73dac3e81123f13d179fbe104c0f50f" Dec 13 06:55:23 crc kubenswrapper[5048]: I1213 06:55:22.743321 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qfqd9" Dec 13 06:55:23 crc kubenswrapper[5048]: I1213 06:55:22.847518 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa4c3efc-7e32-4175-8e21-3929f1992fae-catalog-content\") pod \"fa4c3efc-7e32-4175-8e21-3929f1992fae\" (UID: \"fa4c3efc-7e32-4175-8e21-3929f1992fae\") " Dec 13 06:55:23 crc kubenswrapper[5048]: I1213 06:55:22.847591 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cpp2z\" (UniqueName: \"kubernetes.io/projected/fa4c3efc-7e32-4175-8e21-3929f1992fae-kube-api-access-cpp2z\") pod \"fa4c3efc-7e32-4175-8e21-3929f1992fae\" (UID: \"fa4c3efc-7e32-4175-8e21-3929f1992fae\") " Dec 13 06:55:23 crc kubenswrapper[5048]: I1213 06:55:22.848001 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa4c3efc-7e32-4175-8e21-3929f1992fae-utilities\") pod \"fa4c3efc-7e32-4175-8e21-3929f1992fae\" (UID: \"fa4c3efc-7e32-4175-8e21-3929f1992fae\") " Dec 13 06:55:23 crc kubenswrapper[5048]: I1213 06:55:22.849535 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa4c3efc-7e32-4175-8e21-3929f1992fae-utilities" (OuterVolumeSpecName: "utilities") pod "fa4c3efc-7e32-4175-8e21-3929f1992fae" (UID: "fa4c3efc-7e32-4175-8e21-3929f1992fae"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:55:23 crc kubenswrapper[5048]: I1213 06:55:22.864926 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa4c3efc-7e32-4175-8e21-3929f1992fae-kube-api-access-cpp2z" (OuterVolumeSpecName: "kube-api-access-cpp2z") pod "fa4c3efc-7e32-4175-8e21-3929f1992fae" (UID: "fa4c3efc-7e32-4175-8e21-3929f1992fae"). InnerVolumeSpecName "kube-api-access-cpp2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:55:23 crc kubenswrapper[5048]: I1213 06:55:22.870911 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa4c3efc-7e32-4175-8e21-3929f1992fae-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fa4c3efc-7e32-4175-8e21-3929f1992fae" (UID: "fa4c3efc-7e32-4175-8e21-3929f1992fae"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:55:23 crc kubenswrapper[5048]: I1213 06:55:22.950578 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa4c3efc-7e32-4175-8e21-3929f1992fae-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 06:55:23 crc kubenswrapper[5048]: I1213 06:55:22.950617 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cpp2z\" (UniqueName: \"kubernetes.io/projected/fa4c3efc-7e32-4175-8e21-3929f1992fae-kube-api-access-cpp2z\") on node \"crc\" DevicePath \"\"" Dec 13 06:55:23 crc kubenswrapper[5048]: I1213 06:55:22.950630 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa4c3efc-7e32-4175-8e21-3929f1992fae-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 06:55:23 crc kubenswrapper[5048]: I1213 06:55:23.731776 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qfqd9" Dec 13 06:55:23 crc kubenswrapper[5048]: I1213 06:55:23.768167 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qfqd9"] Dec 13 06:55:23 crc kubenswrapper[5048]: I1213 06:55:23.775589 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qfqd9"] Dec 13 06:55:24 crc kubenswrapper[5048]: I1213 06:55:24.576773 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa4c3efc-7e32-4175-8e21-3929f1992fae" path="/var/lib/kubelet/pods/fa4c3efc-7e32-4175-8e21-3929f1992fae/volumes" Dec 13 06:55:30 crc kubenswrapper[5048]: I1213 06:55:30.577007 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tqbqg"] Dec 13 06:55:30 crc kubenswrapper[5048]: E1213 06:55:30.577963 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa4c3efc-7e32-4175-8e21-3929f1992fae" containerName="extract-utilities" Dec 13 06:55:30 crc kubenswrapper[5048]: I1213 06:55:30.577980 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa4c3efc-7e32-4175-8e21-3929f1992fae" containerName="extract-utilities" Dec 13 06:55:30 crc kubenswrapper[5048]: E1213 06:55:30.577996 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa4c3efc-7e32-4175-8e21-3929f1992fae" containerName="registry-server" Dec 13 06:55:30 crc kubenswrapper[5048]: I1213 06:55:30.578004 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa4c3efc-7e32-4175-8e21-3929f1992fae" containerName="registry-server" Dec 13 06:55:30 crc kubenswrapper[5048]: E1213 06:55:30.578028 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa4c3efc-7e32-4175-8e21-3929f1992fae" containerName="extract-content" Dec 13 06:55:30 crc kubenswrapper[5048]: I1213 06:55:30.578035 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa4c3efc-7e32-4175-8e21-3929f1992fae" containerName="extract-content" Dec 13 06:55:30 crc kubenswrapper[5048]: I1213 06:55:30.578259 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa4c3efc-7e32-4175-8e21-3929f1992fae" containerName="registry-server" Dec 13 06:55:30 crc kubenswrapper[5048]: I1213 06:55:30.579890 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tqbqg" Dec 13 06:55:30 crc kubenswrapper[5048]: I1213 06:55:30.594389 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tqbqg"] Dec 13 06:55:30 crc kubenswrapper[5048]: I1213 06:55:30.699320 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf609db6-39fd-4a76-8558-610658f4a325-utilities\") pod \"community-operators-tqbqg\" (UID: \"bf609db6-39fd-4a76-8558-610658f4a325\") " pod="openshift-marketplace/community-operators-tqbqg" Dec 13 06:55:30 crc kubenswrapper[5048]: I1213 06:55:30.699403 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bkbm6\" (UniqueName: \"kubernetes.io/projected/bf609db6-39fd-4a76-8558-610658f4a325-kube-api-access-bkbm6\") pod \"community-operators-tqbqg\" (UID: \"bf609db6-39fd-4a76-8558-610658f4a325\") " pod="openshift-marketplace/community-operators-tqbqg" Dec 13 06:55:30 crc kubenswrapper[5048]: I1213 06:55:30.699544 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf609db6-39fd-4a76-8558-610658f4a325-catalog-content\") pod \"community-operators-tqbqg\" (UID: \"bf609db6-39fd-4a76-8558-610658f4a325\") " pod="openshift-marketplace/community-operators-tqbqg" Dec 13 06:55:30 crc kubenswrapper[5048]: I1213 06:55:30.801058 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf609db6-39fd-4a76-8558-610658f4a325-catalog-content\") pod \"community-operators-tqbqg\" (UID: \"bf609db6-39fd-4a76-8558-610658f4a325\") " pod="openshift-marketplace/community-operators-tqbqg" Dec 13 06:55:30 crc kubenswrapper[5048]: I1213 06:55:30.801153 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf609db6-39fd-4a76-8558-610658f4a325-utilities\") pod \"community-operators-tqbqg\" (UID: \"bf609db6-39fd-4a76-8558-610658f4a325\") " pod="openshift-marketplace/community-operators-tqbqg" Dec 13 06:55:30 crc kubenswrapper[5048]: I1213 06:55:30.801192 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bkbm6\" (UniqueName: \"kubernetes.io/projected/bf609db6-39fd-4a76-8558-610658f4a325-kube-api-access-bkbm6\") pod \"community-operators-tqbqg\" (UID: \"bf609db6-39fd-4a76-8558-610658f4a325\") " pod="openshift-marketplace/community-operators-tqbqg" Dec 13 06:55:30 crc kubenswrapper[5048]: I1213 06:55:30.801520 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf609db6-39fd-4a76-8558-610658f4a325-catalog-content\") pod \"community-operators-tqbqg\" (UID: \"bf609db6-39fd-4a76-8558-610658f4a325\") " pod="openshift-marketplace/community-operators-tqbqg" Dec 13 06:55:30 crc kubenswrapper[5048]: I1213 06:55:30.801761 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf609db6-39fd-4a76-8558-610658f4a325-utilities\") pod \"community-operators-tqbqg\" (UID: \"bf609db6-39fd-4a76-8558-610658f4a325\") " pod="openshift-marketplace/community-operators-tqbqg" Dec 13 06:55:30 crc kubenswrapper[5048]: I1213 06:55:30.820997 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bkbm6\" (UniqueName: \"kubernetes.io/projected/bf609db6-39fd-4a76-8558-610658f4a325-kube-api-access-bkbm6\") pod \"community-operators-tqbqg\" (UID: \"bf609db6-39fd-4a76-8558-610658f4a325\") " pod="openshift-marketplace/community-operators-tqbqg" Dec 13 06:55:30 crc kubenswrapper[5048]: I1213 06:55:30.902524 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tqbqg" Dec 13 06:55:31 crc kubenswrapper[5048]: I1213 06:55:31.516803 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tqbqg"] Dec 13 06:55:31 crc kubenswrapper[5048]: I1213 06:55:31.797908 5048 generic.go:334] "Generic (PLEG): container finished" podID="bf609db6-39fd-4a76-8558-610658f4a325" containerID="fd0b7585c806fab9d2c37b672907f3eb8410528a09174a74dda47097bad0ab5b" exitCode=0 Dec 13 06:55:31 crc kubenswrapper[5048]: I1213 06:55:31.797956 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tqbqg" event={"ID":"bf609db6-39fd-4a76-8558-610658f4a325","Type":"ContainerDied","Data":"fd0b7585c806fab9d2c37b672907f3eb8410528a09174a74dda47097bad0ab5b"} Dec 13 06:55:31 crc kubenswrapper[5048]: I1213 06:55:31.798228 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tqbqg" event={"ID":"bf609db6-39fd-4a76-8558-610658f4a325","Type":"ContainerStarted","Data":"f55356fafffeba37e50770a711dfc908dcaeea24fa460a97ca53d2665c5cfc95"} Dec 13 06:55:32 crc kubenswrapper[5048]: I1213 06:55:32.808368 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tqbqg" event={"ID":"bf609db6-39fd-4a76-8558-610658f4a325","Type":"ContainerStarted","Data":"46b6137916d6e81b777502b89325da7ac7a09126dd9fab772fb2dace0895a979"} Dec 13 06:55:33 crc kubenswrapper[5048]: I1213 06:55:33.835287 5048 generic.go:334] "Generic (PLEG): container finished" podID="bf609db6-39fd-4a76-8558-610658f4a325" containerID="46b6137916d6e81b777502b89325da7ac7a09126dd9fab772fb2dace0895a979" exitCode=0 Dec 13 06:55:33 crc kubenswrapper[5048]: I1213 06:55:33.835417 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tqbqg" event={"ID":"bf609db6-39fd-4a76-8558-610658f4a325","Type":"ContainerDied","Data":"46b6137916d6e81b777502b89325da7ac7a09126dd9fab772fb2dace0895a979"} Dec 13 06:55:34 crc kubenswrapper[5048]: I1213 06:55:34.848394 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tqbqg" event={"ID":"bf609db6-39fd-4a76-8558-610658f4a325","Type":"ContainerStarted","Data":"0daf824b49186eaa61d293a187348870bc7bdf013c85ad236ff22fa1d9f585c0"} Dec 13 06:55:34 crc kubenswrapper[5048]: I1213 06:55:34.869510 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tqbqg" podStartSLOduration=2.135443565 podStartE2EDuration="4.869427987s" podCreationTimestamp="2025-12-13 06:55:30 +0000 UTC" firstStartedPulling="2025-12-13 06:55:31.799400061 +0000 UTC m=+1565.665994652" lastFinishedPulling="2025-12-13 06:55:34.533384493 +0000 UTC m=+1568.399979074" observedRunningTime="2025-12-13 06:55:34.867212348 +0000 UTC m=+1568.733806949" watchObservedRunningTime="2025-12-13 06:55:34.869427987 +0000 UTC m=+1568.736022568" Dec 13 06:55:40 crc kubenswrapper[5048]: I1213 06:55:40.903251 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tqbqg" Dec 13 06:55:40 crc kubenswrapper[5048]: I1213 06:55:40.903986 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tqbqg" Dec 13 06:55:40 crc kubenswrapper[5048]: I1213 06:55:40.958809 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tqbqg" Dec 13 06:55:41 crc kubenswrapper[5048]: I1213 06:55:41.950418 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tqbqg" Dec 13 06:55:42 crc kubenswrapper[5048]: I1213 06:55:42.021028 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tqbqg"] Dec 13 06:55:43 crc kubenswrapper[5048]: I1213 06:55:43.923502 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-tqbqg" podUID="bf609db6-39fd-4a76-8558-610658f4a325" containerName="registry-server" containerID="cri-o://0daf824b49186eaa61d293a187348870bc7bdf013c85ad236ff22fa1d9f585c0" gracePeriod=2 Dec 13 06:55:44 crc kubenswrapper[5048]: I1213 06:55:44.373198 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tqbqg" Dec 13 06:55:44 crc kubenswrapper[5048]: I1213 06:55:44.465181 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf609db6-39fd-4a76-8558-610658f4a325-catalog-content\") pod \"bf609db6-39fd-4a76-8558-610658f4a325\" (UID: \"bf609db6-39fd-4a76-8558-610658f4a325\") " Dec 13 06:55:44 crc kubenswrapper[5048]: I1213 06:55:44.465304 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf609db6-39fd-4a76-8558-610658f4a325-utilities\") pod \"bf609db6-39fd-4a76-8558-610658f4a325\" (UID: \"bf609db6-39fd-4a76-8558-610658f4a325\") " Dec 13 06:55:44 crc kubenswrapper[5048]: I1213 06:55:44.465365 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bkbm6\" (UniqueName: \"kubernetes.io/projected/bf609db6-39fd-4a76-8558-610658f4a325-kube-api-access-bkbm6\") pod \"bf609db6-39fd-4a76-8558-610658f4a325\" (UID: \"bf609db6-39fd-4a76-8558-610658f4a325\") " Dec 13 06:55:44 crc kubenswrapper[5048]: I1213 06:55:44.466079 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf609db6-39fd-4a76-8558-610658f4a325-utilities" (OuterVolumeSpecName: "utilities") pod "bf609db6-39fd-4a76-8558-610658f4a325" (UID: "bf609db6-39fd-4a76-8558-610658f4a325"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:55:44 crc kubenswrapper[5048]: I1213 06:55:44.470798 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf609db6-39fd-4a76-8558-610658f4a325-kube-api-access-bkbm6" (OuterVolumeSpecName: "kube-api-access-bkbm6") pod "bf609db6-39fd-4a76-8558-610658f4a325" (UID: "bf609db6-39fd-4a76-8558-610658f4a325"). InnerVolumeSpecName "kube-api-access-bkbm6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:55:44 crc kubenswrapper[5048]: I1213 06:55:44.520319 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf609db6-39fd-4a76-8558-610658f4a325-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bf609db6-39fd-4a76-8558-610658f4a325" (UID: "bf609db6-39fd-4a76-8558-610658f4a325"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:55:44 crc kubenswrapper[5048]: I1213 06:55:44.567775 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf609db6-39fd-4a76-8558-610658f4a325-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 06:55:44 crc kubenswrapper[5048]: I1213 06:55:44.567804 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf609db6-39fd-4a76-8558-610658f4a325-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 06:55:44 crc kubenswrapper[5048]: I1213 06:55:44.567816 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bkbm6\" (UniqueName: \"kubernetes.io/projected/bf609db6-39fd-4a76-8558-610658f4a325-kube-api-access-bkbm6\") on node \"crc\" DevicePath \"\"" Dec 13 06:55:44 crc kubenswrapper[5048]: I1213 06:55:44.935108 5048 generic.go:334] "Generic (PLEG): container finished" podID="bf609db6-39fd-4a76-8558-610658f4a325" containerID="0daf824b49186eaa61d293a187348870bc7bdf013c85ad236ff22fa1d9f585c0" exitCode=0 Dec 13 06:55:44 crc kubenswrapper[5048]: I1213 06:55:44.935156 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tqbqg" event={"ID":"bf609db6-39fd-4a76-8558-610658f4a325","Type":"ContainerDied","Data":"0daf824b49186eaa61d293a187348870bc7bdf013c85ad236ff22fa1d9f585c0"} Dec 13 06:55:44 crc kubenswrapper[5048]: I1213 06:55:44.935178 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tqbqg" Dec 13 06:55:44 crc kubenswrapper[5048]: I1213 06:55:44.935201 5048 scope.go:117] "RemoveContainer" containerID="0daf824b49186eaa61d293a187348870bc7bdf013c85ad236ff22fa1d9f585c0" Dec 13 06:55:44 crc kubenswrapper[5048]: I1213 06:55:44.935187 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tqbqg" event={"ID":"bf609db6-39fd-4a76-8558-610658f4a325","Type":"ContainerDied","Data":"f55356fafffeba37e50770a711dfc908dcaeea24fa460a97ca53d2665c5cfc95"} Dec 13 06:55:44 crc kubenswrapper[5048]: I1213 06:55:44.961375 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tqbqg"] Dec 13 06:55:44 crc kubenswrapper[5048]: I1213 06:55:44.965320 5048 scope.go:117] "RemoveContainer" containerID="46b6137916d6e81b777502b89325da7ac7a09126dd9fab772fb2dace0895a979" Dec 13 06:55:44 crc kubenswrapper[5048]: I1213 06:55:44.970268 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-tqbqg"] Dec 13 06:55:44 crc kubenswrapper[5048]: I1213 06:55:44.989766 5048 scope.go:117] "RemoveContainer" containerID="fd0b7585c806fab9d2c37b672907f3eb8410528a09174a74dda47097bad0ab5b" Dec 13 06:55:45 crc kubenswrapper[5048]: I1213 06:55:45.025337 5048 scope.go:117] "RemoveContainer" containerID="0daf824b49186eaa61d293a187348870bc7bdf013c85ad236ff22fa1d9f585c0" Dec 13 06:55:45 crc kubenswrapper[5048]: E1213 06:55:45.025733 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0daf824b49186eaa61d293a187348870bc7bdf013c85ad236ff22fa1d9f585c0\": container with ID starting with 0daf824b49186eaa61d293a187348870bc7bdf013c85ad236ff22fa1d9f585c0 not found: ID does not exist" containerID="0daf824b49186eaa61d293a187348870bc7bdf013c85ad236ff22fa1d9f585c0" Dec 13 06:55:45 crc kubenswrapper[5048]: I1213 06:55:45.025837 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0daf824b49186eaa61d293a187348870bc7bdf013c85ad236ff22fa1d9f585c0"} err="failed to get container status \"0daf824b49186eaa61d293a187348870bc7bdf013c85ad236ff22fa1d9f585c0\": rpc error: code = NotFound desc = could not find container \"0daf824b49186eaa61d293a187348870bc7bdf013c85ad236ff22fa1d9f585c0\": container with ID starting with 0daf824b49186eaa61d293a187348870bc7bdf013c85ad236ff22fa1d9f585c0 not found: ID does not exist" Dec 13 06:55:45 crc kubenswrapper[5048]: I1213 06:55:45.025932 5048 scope.go:117] "RemoveContainer" containerID="46b6137916d6e81b777502b89325da7ac7a09126dd9fab772fb2dace0895a979" Dec 13 06:55:45 crc kubenswrapper[5048]: E1213 06:55:45.026260 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46b6137916d6e81b777502b89325da7ac7a09126dd9fab772fb2dace0895a979\": container with ID starting with 46b6137916d6e81b777502b89325da7ac7a09126dd9fab772fb2dace0895a979 not found: ID does not exist" containerID="46b6137916d6e81b777502b89325da7ac7a09126dd9fab772fb2dace0895a979" Dec 13 06:55:45 crc kubenswrapper[5048]: I1213 06:55:45.026344 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46b6137916d6e81b777502b89325da7ac7a09126dd9fab772fb2dace0895a979"} err="failed to get container status \"46b6137916d6e81b777502b89325da7ac7a09126dd9fab772fb2dace0895a979\": rpc error: code = NotFound desc = could not find container \"46b6137916d6e81b777502b89325da7ac7a09126dd9fab772fb2dace0895a979\": container with ID starting with 46b6137916d6e81b777502b89325da7ac7a09126dd9fab772fb2dace0895a979 not found: ID does not exist" Dec 13 06:55:45 crc kubenswrapper[5048]: I1213 06:55:45.026408 5048 scope.go:117] "RemoveContainer" containerID="fd0b7585c806fab9d2c37b672907f3eb8410528a09174a74dda47097bad0ab5b" Dec 13 06:55:45 crc kubenswrapper[5048]: E1213 06:55:45.026666 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd0b7585c806fab9d2c37b672907f3eb8410528a09174a74dda47097bad0ab5b\": container with ID starting with fd0b7585c806fab9d2c37b672907f3eb8410528a09174a74dda47097bad0ab5b not found: ID does not exist" containerID="fd0b7585c806fab9d2c37b672907f3eb8410528a09174a74dda47097bad0ab5b" Dec 13 06:55:45 crc kubenswrapper[5048]: I1213 06:55:45.026737 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd0b7585c806fab9d2c37b672907f3eb8410528a09174a74dda47097bad0ab5b"} err="failed to get container status \"fd0b7585c806fab9d2c37b672907f3eb8410528a09174a74dda47097bad0ab5b\": rpc error: code = NotFound desc = could not find container \"fd0b7585c806fab9d2c37b672907f3eb8410528a09174a74dda47097bad0ab5b\": container with ID starting with fd0b7585c806fab9d2c37b672907f3eb8410528a09174a74dda47097bad0ab5b not found: ID does not exist" Dec 13 06:55:46 crc kubenswrapper[5048]: I1213 06:55:46.216168 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:55:46 crc kubenswrapper[5048]: I1213 06:55:46.216245 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:55:46 crc kubenswrapper[5048]: I1213 06:55:46.592556 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf609db6-39fd-4a76-8558-610658f4a325" path="/var/lib/kubelet/pods/bf609db6-39fd-4a76-8558-610658f4a325/volumes" Dec 13 06:55:51 crc kubenswrapper[5048]: I1213 06:55:51.796967 5048 scope.go:117] "RemoveContainer" containerID="05a2958c3df88430c0619626d9cc5a043efcc9342e734c40f954d0c3e70041e6" Dec 13 06:56:16 crc kubenswrapper[5048]: I1213 06:56:16.215907 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 06:56:16 crc kubenswrapper[5048]: I1213 06:56:16.216693 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 06:56:16 crc kubenswrapper[5048]: I1213 06:56:16.216764 5048 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 06:56:16 crc kubenswrapper[5048]: I1213 06:56:16.217735 5048 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862"} pod="openshift-machine-config-operator/machine-config-daemon-j7hns" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 13 06:56:16 crc kubenswrapper[5048]: I1213 06:56:16.217838 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" containerID="cri-o://c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" gracePeriod=600 Dec 13 06:56:16 crc kubenswrapper[5048]: E1213 06:56:16.477584 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 06:56:17 crc kubenswrapper[5048]: I1213 06:56:17.267690 5048 generic.go:334] "Generic (PLEG): container finished" podID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" exitCode=0 Dec 13 06:56:17 crc kubenswrapper[5048]: I1213 06:56:17.267742 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerDied","Data":"c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862"} Dec 13 06:56:17 crc kubenswrapper[5048]: I1213 06:56:17.267782 5048 scope.go:117] "RemoveContainer" containerID="73b8468a0b14a8e6512874e20fb2d8442254eac77f67c3fab0272cfdb9a926da" Dec 13 06:56:17 crc kubenswrapper[5048]: I1213 06:56:17.268638 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 06:56:17 crc kubenswrapper[5048]: E1213 06:56:17.269082 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 06:56:29 crc kubenswrapper[5048]: I1213 06:56:29.566840 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 06:56:29 crc kubenswrapper[5048]: E1213 06:56:29.567718 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 06:56:41 crc kubenswrapper[5048]: I1213 06:56:41.566829 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 06:56:41 crc kubenswrapper[5048]: E1213 06:56:41.567789 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 06:56:54 crc kubenswrapper[5048]: I1213 06:56:54.566801 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 06:56:54 crc kubenswrapper[5048]: E1213 06:56:54.567724 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 06:57:09 crc kubenswrapper[5048]: I1213 06:57:09.567065 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 06:57:09 crc kubenswrapper[5048]: E1213 06:57:09.567862 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 06:57:22 crc kubenswrapper[5048]: I1213 06:57:22.566895 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 06:57:22 crc kubenswrapper[5048]: E1213 06:57:22.568555 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 06:57:35 crc kubenswrapper[5048]: I1213 06:57:35.567412 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 06:57:35 crc kubenswrapper[5048]: E1213 06:57:35.568237 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 06:57:47 crc kubenswrapper[5048]: I1213 06:57:47.567255 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 06:57:47 crc kubenswrapper[5048]: E1213 06:57:47.567956 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 06:58:02 crc kubenswrapper[5048]: I1213 06:58:02.567510 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 06:58:02 crc kubenswrapper[5048]: E1213 06:58:02.568167 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 06:58:05 crc kubenswrapper[5048]: I1213 06:58:05.320833 5048 generic.go:334] "Generic (PLEG): container finished" podID="868a604c-3a79-4945-b54e-950797bed05d" containerID="1dfc350495e2a664ffdb370a50be02270d0319f2f17cfef2abaa789bea3d585c" exitCode=0 Dec 13 06:58:05 crc kubenswrapper[5048]: I1213 06:58:05.320936 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx" event={"ID":"868a604c-3a79-4945-b54e-950797bed05d","Type":"ContainerDied","Data":"1dfc350495e2a664ffdb370a50be02270d0319f2f17cfef2abaa789bea3d585c"} Dec 13 06:58:06 crc kubenswrapper[5048]: I1213 06:58:06.725944 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx" Dec 13 06:58:06 crc kubenswrapper[5048]: I1213 06:58:06.870528 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jw8f6\" (UniqueName: \"kubernetes.io/projected/868a604c-3a79-4945-b54e-950797bed05d-kube-api-access-jw8f6\") pod \"868a604c-3a79-4945-b54e-950797bed05d\" (UID: \"868a604c-3a79-4945-b54e-950797bed05d\") " Dec 13 06:58:06 crc kubenswrapper[5048]: I1213 06:58:06.870602 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/868a604c-3a79-4945-b54e-950797bed05d-bootstrap-combined-ca-bundle\") pod \"868a604c-3a79-4945-b54e-950797bed05d\" (UID: \"868a604c-3a79-4945-b54e-950797bed05d\") " Dec 13 06:58:06 crc kubenswrapper[5048]: I1213 06:58:06.870637 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/868a604c-3a79-4945-b54e-950797bed05d-inventory\") pod \"868a604c-3a79-4945-b54e-950797bed05d\" (UID: \"868a604c-3a79-4945-b54e-950797bed05d\") " Dec 13 06:58:06 crc kubenswrapper[5048]: I1213 06:58:06.870807 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/868a604c-3a79-4945-b54e-950797bed05d-ssh-key\") pod \"868a604c-3a79-4945-b54e-950797bed05d\" (UID: \"868a604c-3a79-4945-b54e-950797bed05d\") " Dec 13 06:58:06 crc kubenswrapper[5048]: I1213 06:58:06.876610 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/868a604c-3a79-4945-b54e-950797bed05d-kube-api-access-jw8f6" (OuterVolumeSpecName: "kube-api-access-jw8f6") pod "868a604c-3a79-4945-b54e-950797bed05d" (UID: "868a604c-3a79-4945-b54e-950797bed05d"). InnerVolumeSpecName "kube-api-access-jw8f6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:58:06 crc kubenswrapper[5048]: I1213 06:58:06.877423 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/868a604c-3a79-4945-b54e-950797bed05d-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "868a604c-3a79-4945-b54e-950797bed05d" (UID: "868a604c-3a79-4945-b54e-950797bed05d"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:58:06 crc kubenswrapper[5048]: E1213 06:58:06.900193 5048 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/868a604c-3a79-4945-b54e-950797bed05d-ssh-key podName:868a604c-3a79-4945-b54e-950797bed05d nodeName:}" failed. No retries permitted until 2025-12-13 06:58:07.400161324 +0000 UTC m=+1721.266755925 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "ssh-key" (UniqueName: "kubernetes.io/secret/868a604c-3a79-4945-b54e-950797bed05d-ssh-key") pod "868a604c-3a79-4945-b54e-950797bed05d" (UID: "868a604c-3a79-4945-b54e-950797bed05d") : error deleting /var/lib/kubelet/pods/868a604c-3a79-4945-b54e-950797bed05d/volume-subpaths: remove /var/lib/kubelet/pods/868a604c-3a79-4945-b54e-950797bed05d/volume-subpaths: no such file or directory Dec 13 06:58:06 crc kubenswrapper[5048]: I1213 06:58:06.904025 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/868a604c-3a79-4945-b54e-950797bed05d-inventory" (OuterVolumeSpecName: "inventory") pod "868a604c-3a79-4945-b54e-950797bed05d" (UID: "868a604c-3a79-4945-b54e-950797bed05d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:58:06 crc kubenswrapper[5048]: I1213 06:58:06.973355 5048 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/868a604c-3a79-4945-b54e-950797bed05d-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 06:58:06 crc kubenswrapper[5048]: I1213 06:58:06.973385 5048 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/868a604c-3a79-4945-b54e-950797bed05d-inventory\") on node \"crc\" DevicePath \"\"" Dec 13 06:58:06 crc kubenswrapper[5048]: I1213 06:58:06.973394 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jw8f6\" (UniqueName: \"kubernetes.io/projected/868a604c-3a79-4945-b54e-950797bed05d-kube-api-access-jw8f6\") on node \"crc\" DevicePath \"\"" Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.340176 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx" event={"ID":"868a604c-3a79-4945-b54e-950797bed05d","Type":"ContainerDied","Data":"18fe2b17558f16900a190a6a30e7616d7d59197f652a996e602ea871d39d422f"} Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.340215 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="18fe2b17558f16900a190a6a30e7616d7d59197f652a996e602ea871d39d422f" Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.340220 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx" Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.435802 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-j46dh"] Dec 13 06:58:07 crc kubenswrapper[5048]: E1213 06:58:07.436226 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf609db6-39fd-4a76-8558-610658f4a325" containerName="registry-server" Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.436246 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf609db6-39fd-4a76-8558-610658f4a325" containerName="registry-server" Dec 13 06:58:07 crc kubenswrapper[5048]: E1213 06:58:07.436267 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf609db6-39fd-4a76-8558-610658f4a325" containerName="extract-content" Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.436275 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf609db6-39fd-4a76-8558-610658f4a325" containerName="extract-content" Dec 13 06:58:07 crc kubenswrapper[5048]: E1213 06:58:07.436286 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="868a604c-3a79-4945-b54e-950797bed05d" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.436294 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="868a604c-3a79-4945-b54e-950797bed05d" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 13 06:58:07 crc kubenswrapper[5048]: E1213 06:58:07.436307 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf609db6-39fd-4a76-8558-610658f4a325" containerName="extract-utilities" Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.436312 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf609db6-39fd-4a76-8558-610658f4a325" containerName="extract-utilities" Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.436500 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf609db6-39fd-4a76-8558-610658f4a325" containerName="registry-server" Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.436511 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="868a604c-3a79-4945-b54e-950797bed05d" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.437171 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-j46dh" Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.444127 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-j46dh"] Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.482006 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/868a604c-3a79-4945-b54e-950797bed05d-ssh-key\") pod \"868a604c-3a79-4945-b54e-950797bed05d\" (UID: \"868a604c-3a79-4945-b54e-950797bed05d\") " Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.491043 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/868a604c-3a79-4945-b54e-950797bed05d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "868a604c-3a79-4945-b54e-950797bed05d" (UID: "868a604c-3a79-4945-b54e-950797bed05d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.583973 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/78eb5a3b-7802-4507-bb35-37cc2e8edb56-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-j46dh\" (UID: \"78eb5a3b-7802-4507-bb35-37cc2e8edb56\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-j46dh" Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.584057 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78eb5a3b-7802-4507-bb35-37cc2e8edb56-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-j46dh\" (UID: \"78eb5a3b-7802-4507-bb35-37cc2e8edb56\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-j46dh" Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.584208 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpp72\" (UniqueName: \"kubernetes.io/projected/78eb5a3b-7802-4507-bb35-37cc2e8edb56-kube-api-access-fpp72\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-j46dh\" (UID: \"78eb5a3b-7802-4507-bb35-37cc2e8edb56\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-j46dh" Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.584336 5048 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/868a604c-3a79-4945-b54e-950797bed05d-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.685757 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/78eb5a3b-7802-4507-bb35-37cc2e8edb56-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-j46dh\" (UID: \"78eb5a3b-7802-4507-bb35-37cc2e8edb56\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-j46dh" Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.685832 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78eb5a3b-7802-4507-bb35-37cc2e8edb56-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-j46dh\" (UID: \"78eb5a3b-7802-4507-bb35-37cc2e8edb56\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-j46dh" Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.685929 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpp72\" (UniqueName: \"kubernetes.io/projected/78eb5a3b-7802-4507-bb35-37cc2e8edb56-kube-api-access-fpp72\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-j46dh\" (UID: \"78eb5a3b-7802-4507-bb35-37cc2e8edb56\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-j46dh" Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.690933 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/78eb5a3b-7802-4507-bb35-37cc2e8edb56-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-j46dh\" (UID: \"78eb5a3b-7802-4507-bb35-37cc2e8edb56\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-j46dh" Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.691029 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78eb5a3b-7802-4507-bb35-37cc2e8edb56-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-j46dh\" (UID: \"78eb5a3b-7802-4507-bb35-37cc2e8edb56\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-j46dh" Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.703013 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpp72\" (UniqueName: \"kubernetes.io/projected/78eb5a3b-7802-4507-bb35-37cc2e8edb56-kube-api-access-fpp72\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-j46dh\" (UID: \"78eb5a3b-7802-4507-bb35-37cc2e8edb56\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-j46dh" Dec 13 06:58:07 crc kubenswrapper[5048]: I1213 06:58:07.759918 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-j46dh" Dec 13 06:58:08 crc kubenswrapper[5048]: I1213 06:58:08.299140 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-j46dh"] Dec 13 06:58:08 crc kubenswrapper[5048]: I1213 06:58:08.305373 5048 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 13 06:58:08 crc kubenswrapper[5048]: I1213 06:58:08.349371 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-j46dh" event={"ID":"78eb5a3b-7802-4507-bb35-37cc2e8edb56","Type":"ContainerStarted","Data":"db6dff42e064653b90de5d508e8fbf361c2dbdea024be1825504bcfe0e85ab1b"} Dec 13 06:58:09 crc kubenswrapper[5048]: I1213 06:58:09.362906 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-j46dh" event={"ID":"78eb5a3b-7802-4507-bb35-37cc2e8edb56","Type":"ContainerStarted","Data":"47ca660f909e9c4b43aff4e50d19155115e7470cde8c68325ebd97f13c871d86"} Dec 13 06:58:09 crc kubenswrapper[5048]: I1213 06:58:09.377995 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-j46dh" podStartSLOduration=1.83753261 podStartE2EDuration="2.377973447s" podCreationTimestamp="2025-12-13 06:58:07 +0000 UTC" firstStartedPulling="2025-12-13 06:58:08.305094237 +0000 UTC m=+1722.171688818" lastFinishedPulling="2025-12-13 06:58:08.845535064 +0000 UTC m=+1722.712129655" observedRunningTime="2025-12-13 06:58:09.375696126 +0000 UTC m=+1723.242290707" watchObservedRunningTime="2025-12-13 06:58:09.377973447 +0000 UTC m=+1723.244568028" Dec 13 06:58:16 crc kubenswrapper[5048]: I1213 06:58:16.574831 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 06:58:16 crc kubenswrapper[5048]: E1213 06:58:16.576208 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 06:58:27 crc kubenswrapper[5048]: I1213 06:58:27.050136 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-dac3-account-create-update-nrtk8"] Dec 13 06:58:27 crc kubenswrapper[5048]: I1213 06:58:27.058721 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-dac3-account-create-update-nrtk8"] Dec 13 06:58:28 crc kubenswrapper[5048]: I1213 06:58:28.039976 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-54a6-account-create-update-fvkg9"] Dec 13 06:58:28 crc kubenswrapper[5048]: I1213 06:58:28.049586 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-rqc56"] Dec 13 06:58:28 crc kubenswrapper[5048]: I1213 06:58:28.060247 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-bfzrz"] Dec 13 06:58:28 crc kubenswrapper[5048]: I1213 06:58:28.071466 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-nztt9"] Dec 13 06:58:28 crc kubenswrapper[5048]: I1213 06:58:28.081727 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-54a6-account-create-update-fvkg9"] Dec 13 06:58:28 crc kubenswrapper[5048]: I1213 06:58:28.107131 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-rqc56"] Dec 13 06:58:28 crc kubenswrapper[5048]: I1213 06:58:28.118340 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-nztt9"] Dec 13 06:58:28 crc kubenswrapper[5048]: I1213 06:58:28.129258 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-bfzrz"] Dec 13 06:58:28 crc kubenswrapper[5048]: I1213 06:58:28.140040 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-cb17-account-create-update-rhz4v"] Dec 13 06:58:28 crc kubenswrapper[5048]: I1213 06:58:28.148564 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-cb17-account-create-update-rhz4v"] Dec 13 06:58:28 crc kubenswrapper[5048]: I1213 06:58:28.581373 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19c49aa3-f62c-4c78-a8b7-a6858f7da04e" path="/var/lib/kubelet/pods/19c49aa3-f62c-4c78-a8b7-a6858f7da04e/volumes" Dec 13 06:58:28 crc kubenswrapper[5048]: I1213 06:58:28.582333 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b80676d-e344-4949-8a27-22381100d354" path="/var/lib/kubelet/pods/9b80676d-e344-4949-8a27-22381100d354/volumes" Dec 13 06:58:28 crc kubenswrapper[5048]: I1213 06:58:28.583114 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a1efce17-5954-4d64-b35e-8ac14d24e7b0" path="/var/lib/kubelet/pods/a1efce17-5954-4d64-b35e-8ac14d24e7b0/volumes" Dec 13 06:58:28 crc kubenswrapper[5048]: I1213 06:58:28.583856 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a731241e-7fd1-45cb-9204-e71ff2a62a91" path="/var/lib/kubelet/pods/a731241e-7fd1-45cb-9204-e71ff2a62a91/volumes" Dec 13 06:58:28 crc kubenswrapper[5048]: I1213 06:58:28.585185 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5ec0a88-0ac6-437e-8aa2-293ac501fadc" path="/var/lib/kubelet/pods/d5ec0a88-0ac6-437e-8aa2-293ac501fadc/volumes" Dec 13 06:58:28 crc kubenswrapper[5048]: I1213 06:58:28.585921 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d790a6f6-a1d9-4f9c-84ae-de2358b07262" path="/var/lib/kubelet/pods/d790a6f6-a1d9-4f9c-84ae-de2358b07262/volumes" Dec 13 06:58:29 crc kubenswrapper[5048]: I1213 06:58:29.566785 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 06:58:29 crc kubenswrapper[5048]: E1213 06:58:29.567354 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 06:58:43 crc kubenswrapper[5048]: I1213 06:58:43.567178 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 06:58:43 crc kubenswrapper[5048]: E1213 06:58:43.567837 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 06:58:51 crc kubenswrapper[5048]: I1213 06:58:51.909293 5048 scope.go:117] "RemoveContainer" containerID="47d1ae9ee992ee9bdd13d9886f9bd8fc67cf382bad934b6083cbef27680f3337" Dec 13 06:58:51 crc kubenswrapper[5048]: I1213 06:58:51.946556 5048 scope.go:117] "RemoveContainer" containerID="e0387975ec5e573db9d08ec659aef4ce71262e1ea721023bac3f0647ffebce10" Dec 13 06:58:52 crc kubenswrapper[5048]: I1213 06:58:52.004252 5048 scope.go:117] "RemoveContainer" containerID="2f9aeda042cb1889d878094a98cbcea9dac862690670083b09197057618a41ee" Dec 13 06:58:52 crc kubenswrapper[5048]: I1213 06:58:52.033333 5048 scope.go:117] "RemoveContainer" containerID="b28c482610ca4bbf122d087c8b8a4e5294f8e089defe8a3a4a0edbc7f099c121" Dec 13 06:58:52 crc kubenswrapper[5048]: I1213 06:58:52.078958 5048 scope.go:117] "RemoveContainer" containerID="5e3952e75d057ef17b2c6beb8312bdb58d29b83e81bff8533cd0487bdc7b480c" Dec 13 06:58:52 crc kubenswrapper[5048]: I1213 06:58:52.126751 5048 scope.go:117] "RemoveContainer" containerID="0392c1cbe1fce800c6d0e7030ce49be24f27d534b3da8652ab77cd70f5b9786f" Dec 13 06:58:56 crc kubenswrapper[5048]: I1213 06:58:56.574687 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 06:58:56 crc kubenswrapper[5048]: E1213 06:58:56.575376 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 06:59:05 crc kubenswrapper[5048]: I1213 06:59:05.047805 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-zwlkk"] Dec 13 06:59:05 crc kubenswrapper[5048]: I1213 06:59:05.057618 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-c6qr4"] Dec 13 06:59:05 crc kubenswrapper[5048]: I1213 06:59:05.064999 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-4fa6-account-create-update-wv7m6"] Dec 13 06:59:05 crc kubenswrapper[5048]: I1213 06:59:05.088529 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-zwlkk"] Dec 13 06:59:05 crc kubenswrapper[5048]: I1213 06:59:05.088810 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-4fa6-account-create-update-wv7m6"] Dec 13 06:59:05 crc kubenswrapper[5048]: I1213 06:59:05.097384 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-c6qr4"] Dec 13 06:59:06 crc kubenswrapper[5048]: I1213 06:59:06.580124 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37e65177-fd1b-4fee-aad0-e089b9e9c47b" path="/var/lib/kubelet/pods/37e65177-fd1b-4fee-aad0-e089b9e9c47b/volumes" Dec 13 06:59:06 crc kubenswrapper[5048]: I1213 06:59:06.581656 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56bc6cf2-a689-4bbe-a20e-beda8ebe0165" path="/var/lib/kubelet/pods/56bc6cf2-a689-4bbe-a20e-beda8ebe0165/volumes" Dec 13 06:59:06 crc kubenswrapper[5048]: I1213 06:59:06.582918 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef4c1d97-0727-448e-910c-762a2f36ca80" path="/var/lib/kubelet/pods/ef4c1d97-0727-448e-910c-762a2f36ca80/volumes" Dec 13 06:59:09 crc kubenswrapper[5048]: I1213 06:59:09.041496 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-5aa0-account-create-update-24bj7"] Dec 13 06:59:09 crc kubenswrapper[5048]: I1213 06:59:09.048306 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-b786-account-create-update-s8j9n"] Dec 13 06:59:09 crc kubenswrapper[5048]: I1213 06:59:09.055297 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-wmwmb"] Dec 13 06:59:09 crc kubenswrapper[5048]: I1213 06:59:09.062255 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-wmwmb"] Dec 13 06:59:09 crc kubenswrapper[5048]: I1213 06:59:09.069218 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-b786-account-create-update-s8j9n"] Dec 13 06:59:09 crc kubenswrapper[5048]: I1213 06:59:09.075609 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-5aa0-account-create-update-24bj7"] Dec 13 06:59:10 crc kubenswrapper[5048]: I1213 06:59:10.576698 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94" path="/var/lib/kubelet/pods/0f2d2a25-1fa1-4d52-9ecb-5efe9871ea94/volumes" Dec 13 06:59:10 crc kubenswrapper[5048]: I1213 06:59:10.577543 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84147fa9-3a4b-467d-a9db-d8c4c3ff66e8" path="/var/lib/kubelet/pods/84147fa9-3a4b-467d-a9db-d8c4c3ff66e8/volumes" Dec 13 06:59:10 crc kubenswrapper[5048]: I1213 06:59:10.578048 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a786107a-8b19-4bbd-9049-1d4590029976" path="/var/lib/kubelet/pods/a786107a-8b19-4bbd-9049-1d4590029976/volumes" Dec 13 06:59:11 crc kubenswrapper[5048]: I1213 06:59:11.567286 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 06:59:11 crc kubenswrapper[5048]: E1213 06:59:11.567596 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 06:59:14 crc kubenswrapper[5048]: I1213 06:59:14.041522 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-hgfjd"] Dec 13 06:59:14 crc kubenswrapper[5048]: I1213 06:59:14.049589 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-hgfjd"] Dec 13 06:59:14 crc kubenswrapper[5048]: I1213 06:59:14.062395 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-hscms"] Dec 13 06:59:14 crc kubenswrapper[5048]: I1213 06:59:14.074144 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-hscms"] Dec 13 06:59:14 crc kubenswrapper[5048]: I1213 06:59:14.580153 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f13f74e-d8ac-4cd4-a6f6-4797086d1f82" path="/var/lib/kubelet/pods/3f13f74e-d8ac-4cd4-a6f6-4797086d1f82/volumes" Dec 13 06:59:14 crc kubenswrapper[5048]: I1213 06:59:14.581419 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91aa9cb1-7cd0-4cd3-9918-460b5d976ab7" path="/var/lib/kubelet/pods/91aa9cb1-7cd0-4cd3-9918-460b5d976ab7/volumes" Dec 13 06:59:23 crc kubenswrapper[5048]: I1213 06:59:23.566912 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 06:59:23 crc kubenswrapper[5048]: E1213 06:59:23.567776 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 06:59:35 crc kubenswrapper[5048]: I1213 06:59:35.467501 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-h924v"] Dec 13 06:59:35 crc kubenswrapper[5048]: I1213 06:59:35.470034 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h924v" Dec 13 06:59:35 crc kubenswrapper[5048]: I1213 06:59:35.480549 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h924v"] Dec 13 06:59:35 crc kubenswrapper[5048]: I1213 06:59:35.574760 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/645f2702-d44d-48c7-93e6-422b82cc3eb3-catalog-content\") pod \"redhat-operators-h924v\" (UID: \"645f2702-d44d-48c7-93e6-422b82cc3eb3\") " pod="openshift-marketplace/redhat-operators-h924v" Dec 13 06:59:35 crc kubenswrapper[5048]: I1213 06:59:35.575005 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7kt4\" (UniqueName: \"kubernetes.io/projected/645f2702-d44d-48c7-93e6-422b82cc3eb3-kube-api-access-t7kt4\") pod \"redhat-operators-h924v\" (UID: \"645f2702-d44d-48c7-93e6-422b82cc3eb3\") " pod="openshift-marketplace/redhat-operators-h924v" Dec 13 06:59:35 crc kubenswrapper[5048]: I1213 06:59:35.575256 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/645f2702-d44d-48c7-93e6-422b82cc3eb3-utilities\") pod \"redhat-operators-h924v\" (UID: \"645f2702-d44d-48c7-93e6-422b82cc3eb3\") " pod="openshift-marketplace/redhat-operators-h924v" Dec 13 06:59:35 crc kubenswrapper[5048]: I1213 06:59:35.677066 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7kt4\" (UniqueName: \"kubernetes.io/projected/645f2702-d44d-48c7-93e6-422b82cc3eb3-kube-api-access-t7kt4\") pod \"redhat-operators-h924v\" (UID: \"645f2702-d44d-48c7-93e6-422b82cc3eb3\") " pod="openshift-marketplace/redhat-operators-h924v" Dec 13 06:59:35 crc kubenswrapper[5048]: I1213 06:59:35.677194 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/645f2702-d44d-48c7-93e6-422b82cc3eb3-utilities\") pod \"redhat-operators-h924v\" (UID: \"645f2702-d44d-48c7-93e6-422b82cc3eb3\") " pod="openshift-marketplace/redhat-operators-h924v" Dec 13 06:59:35 crc kubenswrapper[5048]: I1213 06:59:35.677228 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/645f2702-d44d-48c7-93e6-422b82cc3eb3-catalog-content\") pod \"redhat-operators-h924v\" (UID: \"645f2702-d44d-48c7-93e6-422b82cc3eb3\") " pod="openshift-marketplace/redhat-operators-h924v" Dec 13 06:59:35 crc kubenswrapper[5048]: I1213 06:59:35.677744 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/645f2702-d44d-48c7-93e6-422b82cc3eb3-utilities\") pod \"redhat-operators-h924v\" (UID: \"645f2702-d44d-48c7-93e6-422b82cc3eb3\") " pod="openshift-marketplace/redhat-operators-h924v" Dec 13 06:59:35 crc kubenswrapper[5048]: I1213 06:59:35.677938 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/645f2702-d44d-48c7-93e6-422b82cc3eb3-catalog-content\") pod \"redhat-operators-h924v\" (UID: \"645f2702-d44d-48c7-93e6-422b82cc3eb3\") " pod="openshift-marketplace/redhat-operators-h924v" Dec 13 06:59:35 crc kubenswrapper[5048]: I1213 06:59:35.707535 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7kt4\" (UniqueName: \"kubernetes.io/projected/645f2702-d44d-48c7-93e6-422b82cc3eb3-kube-api-access-t7kt4\") pod \"redhat-operators-h924v\" (UID: \"645f2702-d44d-48c7-93e6-422b82cc3eb3\") " pod="openshift-marketplace/redhat-operators-h924v" Dec 13 06:59:35 crc kubenswrapper[5048]: I1213 06:59:35.792648 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h924v" Dec 13 06:59:36 crc kubenswrapper[5048]: I1213 06:59:36.276054 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h924v"] Dec 13 06:59:36 crc kubenswrapper[5048]: W1213 06:59:36.279040 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod645f2702_d44d_48c7_93e6_422b82cc3eb3.slice/crio-1f880638d8a10c77c50c4d5e1454a95d9c0b5584ec7be0014f01f80c95fd4b84 WatchSource:0}: Error finding container 1f880638d8a10c77c50c4d5e1454a95d9c0b5584ec7be0014f01f80c95fd4b84: Status 404 returned error can't find the container with id 1f880638d8a10c77c50c4d5e1454a95d9c0b5584ec7be0014f01f80c95fd4b84 Dec 13 06:59:36 crc kubenswrapper[5048]: I1213 06:59:36.468853 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-bpcbd"] Dec 13 06:59:36 crc kubenswrapper[5048]: I1213 06:59:36.471336 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bpcbd" Dec 13 06:59:36 crc kubenswrapper[5048]: I1213 06:59:36.478190 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bpcbd"] Dec 13 06:59:36 crc kubenswrapper[5048]: I1213 06:59:36.513863 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f03b25c0-0fb6-42b4-84d9-2fdf4498170e-catalog-content\") pod \"certified-operators-bpcbd\" (UID: \"f03b25c0-0fb6-42b4-84d9-2fdf4498170e\") " pod="openshift-marketplace/certified-operators-bpcbd" Dec 13 06:59:36 crc kubenswrapper[5048]: I1213 06:59:36.513937 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhv8t\" (UniqueName: \"kubernetes.io/projected/f03b25c0-0fb6-42b4-84d9-2fdf4498170e-kube-api-access-mhv8t\") pod \"certified-operators-bpcbd\" (UID: \"f03b25c0-0fb6-42b4-84d9-2fdf4498170e\") " pod="openshift-marketplace/certified-operators-bpcbd" Dec 13 06:59:36 crc kubenswrapper[5048]: I1213 06:59:36.513974 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f03b25c0-0fb6-42b4-84d9-2fdf4498170e-utilities\") pod \"certified-operators-bpcbd\" (UID: \"f03b25c0-0fb6-42b4-84d9-2fdf4498170e\") " pod="openshift-marketplace/certified-operators-bpcbd" Dec 13 06:59:36 crc kubenswrapper[5048]: I1213 06:59:36.615385 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f03b25c0-0fb6-42b4-84d9-2fdf4498170e-catalog-content\") pod \"certified-operators-bpcbd\" (UID: \"f03b25c0-0fb6-42b4-84d9-2fdf4498170e\") " pod="openshift-marketplace/certified-operators-bpcbd" Dec 13 06:59:36 crc kubenswrapper[5048]: I1213 06:59:36.615460 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhv8t\" (UniqueName: \"kubernetes.io/projected/f03b25c0-0fb6-42b4-84d9-2fdf4498170e-kube-api-access-mhv8t\") pod \"certified-operators-bpcbd\" (UID: \"f03b25c0-0fb6-42b4-84d9-2fdf4498170e\") " pod="openshift-marketplace/certified-operators-bpcbd" Dec 13 06:59:36 crc kubenswrapper[5048]: I1213 06:59:36.615490 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f03b25c0-0fb6-42b4-84d9-2fdf4498170e-utilities\") pod \"certified-operators-bpcbd\" (UID: \"f03b25c0-0fb6-42b4-84d9-2fdf4498170e\") " pod="openshift-marketplace/certified-operators-bpcbd" Dec 13 06:59:36 crc kubenswrapper[5048]: I1213 06:59:36.616024 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f03b25c0-0fb6-42b4-84d9-2fdf4498170e-utilities\") pod \"certified-operators-bpcbd\" (UID: \"f03b25c0-0fb6-42b4-84d9-2fdf4498170e\") " pod="openshift-marketplace/certified-operators-bpcbd" Dec 13 06:59:36 crc kubenswrapper[5048]: I1213 06:59:36.616230 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f03b25c0-0fb6-42b4-84d9-2fdf4498170e-catalog-content\") pod \"certified-operators-bpcbd\" (UID: \"f03b25c0-0fb6-42b4-84d9-2fdf4498170e\") " pod="openshift-marketplace/certified-operators-bpcbd" Dec 13 06:59:36 crc kubenswrapper[5048]: I1213 06:59:36.642305 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhv8t\" (UniqueName: \"kubernetes.io/projected/f03b25c0-0fb6-42b4-84d9-2fdf4498170e-kube-api-access-mhv8t\") pod \"certified-operators-bpcbd\" (UID: \"f03b25c0-0fb6-42b4-84d9-2fdf4498170e\") " pod="openshift-marketplace/certified-operators-bpcbd" Dec 13 06:59:36 crc kubenswrapper[5048]: I1213 06:59:36.715288 5048 generic.go:334] "Generic (PLEG): container finished" podID="645f2702-d44d-48c7-93e6-422b82cc3eb3" containerID="d2f5796484f9da2329169b1fd0b26d104e18e06271fd3251b6cd55d3fbc80790" exitCode=0 Dec 13 06:59:36 crc kubenswrapper[5048]: I1213 06:59:36.715551 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h924v" event={"ID":"645f2702-d44d-48c7-93e6-422b82cc3eb3","Type":"ContainerDied","Data":"d2f5796484f9da2329169b1fd0b26d104e18e06271fd3251b6cd55d3fbc80790"} Dec 13 06:59:36 crc kubenswrapper[5048]: I1213 06:59:36.715576 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h924v" event={"ID":"645f2702-d44d-48c7-93e6-422b82cc3eb3","Type":"ContainerStarted","Data":"1f880638d8a10c77c50c4d5e1454a95d9c0b5584ec7be0014f01f80c95fd4b84"} Dec 13 06:59:36 crc kubenswrapper[5048]: I1213 06:59:36.835115 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bpcbd" Dec 13 06:59:37 crc kubenswrapper[5048]: I1213 06:59:37.285250 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bpcbd"] Dec 13 06:59:37 crc kubenswrapper[5048]: I1213 06:59:37.727071 5048 generic.go:334] "Generic (PLEG): container finished" podID="f03b25c0-0fb6-42b4-84d9-2fdf4498170e" containerID="4f2abdef929ec72e7d1b6e95fa0434bb81921826a40089210f49527acf6e850d" exitCode=0 Dec 13 06:59:37 crc kubenswrapper[5048]: I1213 06:59:37.727141 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bpcbd" event={"ID":"f03b25c0-0fb6-42b4-84d9-2fdf4498170e","Type":"ContainerDied","Data":"4f2abdef929ec72e7d1b6e95fa0434bb81921826a40089210f49527acf6e850d"} Dec 13 06:59:37 crc kubenswrapper[5048]: I1213 06:59:37.727612 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bpcbd" event={"ID":"f03b25c0-0fb6-42b4-84d9-2fdf4498170e","Type":"ContainerStarted","Data":"8e13cedac378b652257daf8ce2a427f6064ca73a2477110f35bc9411fa54de98"} Dec 13 06:59:38 crc kubenswrapper[5048]: I1213 06:59:38.567419 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 06:59:38 crc kubenswrapper[5048]: E1213 06:59:38.567721 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 06:59:38 crc kubenswrapper[5048]: I1213 06:59:38.740571 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h924v" event={"ID":"645f2702-d44d-48c7-93e6-422b82cc3eb3","Type":"ContainerStarted","Data":"41181a726270c709cb862a71f0f599e4359c2d2cd1a4b470a89902360bc5cf4e"} Dec 13 06:59:39 crc kubenswrapper[5048]: I1213 06:59:39.759132 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bpcbd" event={"ID":"f03b25c0-0fb6-42b4-84d9-2fdf4498170e","Type":"ContainerStarted","Data":"87e8dec7a48f06e6f90ff84797ecf65779d6d73578fa0c521934a4aceee4fcd6"} Dec 13 06:59:40 crc kubenswrapper[5048]: I1213 06:59:40.770931 5048 generic.go:334] "Generic (PLEG): container finished" podID="645f2702-d44d-48c7-93e6-422b82cc3eb3" containerID="41181a726270c709cb862a71f0f599e4359c2d2cd1a4b470a89902360bc5cf4e" exitCode=0 Dec 13 06:59:40 crc kubenswrapper[5048]: I1213 06:59:40.771012 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h924v" event={"ID":"645f2702-d44d-48c7-93e6-422b82cc3eb3","Type":"ContainerDied","Data":"41181a726270c709cb862a71f0f599e4359c2d2cd1a4b470a89902360bc5cf4e"} Dec 13 06:59:42 crc kubenswrapper[5048]: I1213 06:59:42.789411 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h924v" event={"ID":"645f2702-d44d-48c7-93e6-422b82cc3eb3","Type":"ContainerStarted","Data":"acd5395074f03239d831a1cf05e52d75e4e0800b8f8cfdc7d1189f9df1260b51"} Dec 13 06:59:42 crc kubenswrapper[5048]: I1213 06:59:42.793214 5048 generic.go:334] "Generic (PLEG): container finished" podID="f03b25c0-0fb6-42b4-84d9-2fdf4498170e" containerID="87e8dec7a48f06e6f90ff84797ecf65779d6d73578fa0c521934a4aceee4fcd6" exitCode=0 Dec 13 06:59:42 crc kubenswrapper[5048]: I1213 06:59:42.793267 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bpcbd" event={"ID":"f03b25c0-0fb6-42b4-84d9-2fdf4498170e","Type":"ContainerDied","Data":"87e8dec7a48f06e6f90ff84797ecf65779d6d73578fa0c521934a4aceee4fcd6"} Dec 13 06:59:42 crc kubenswrapper[5048]: I1213 06:59:42.823622 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-h924v" podStartSLOduration=2.4276473149999998 podStartE2EDuration="7.823602953s" podCreationTimestamp="2025-12-13 06:59:35 +0000 UTC" firstStartedPulling="2025-12-13 06:59:36.718427554 +0000 UTC m=+1810.585022135" lastFinishedPulling="2025-12-13 06:59:42.114383172 +0000 UTC m=+1815.980977773" observedRunningTime="2025-12-13 06:59:42.816074012 +0000 UTC m=+1816.682668623" watchObservedRunningTime="2025-12-13 06:59:42.823602953 +0000 UTC m=+1816.690197544" Dec 13 06:59:43 crc kubenswrapper[5048]: I1213 06:59:43.805332 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bpcbd" event={"ID":"f03b25c0-0fb6-42b4-84d9-2fdf4498170e","Type":"ContainerStarted","Data":"8dde5af7779d318c2382a16f326be48c08ceff5ede7a8303c38c1451d16ae093"} Dec 13 06:59:43 crc kubenswrapper[5048]: I1213 06:59:43.829704 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-bpcbd" podStartSLOduration=2.076148561 podStartE2EDuration="7.829683203s" podCreationTimestamp="2025-12-13 06:59:36 +0000 UTC" firstStartedPulling="2025-12-13 06:59:37.729425886 +0000 UTC m=+1811.596020477" lastFinishedPulling="2025-12-13 06:59:43.482960538 +0000 UTC m=+1817.349555119" observedRunningTime="2025-12-13 06:59:43.822151121 +0000 UTC m=+1817.688745702" watchObservedRunningTime="2025-12-13 06:59:43.829683203 +0000 UTC m=+1817.696277804" Dec 13 06:59:45 crc kubenswrapper[5048]: I1213 06:59:45.793266 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-h924v" Dec 13 06:59:45 crc kubenswrapper[5048]: I1213 06:59:45.793568 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-h924v" Dec 13 06:59:46 crc kubenswrapper[5048]: I1213 06:59:46.836179 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-bpcbd" Dec 13 06:59:46 crc kubenswrapper[5048]: I1213 06:59:46.836577 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-bpcbd" Dec 13 06:59:46 crc kubenswrapper[5048]: I1213 06:59:46.860515 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h924v" podUID="645f2702-d44d-48c7-93e6-422b82cc3eb3" containerName="registry-server" probeResult="failure" output=< Dec 13 06:59:46 crc kubenswrapper[5048]: timeout: failed to connect service ":50051" within 1s Dec 13 06:59:46 crc kubenswrapper[5048]: > Dec 13 06:59:46 crc kubenswrapper[5048]: I1213 06:59:46.879390 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-bpcbd" Dec 13 06:59:48 crc kubenswrapper[5048]: I1213 06:59:48.039312 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-7ksgw"] Dec 13 06:59:48 crc kubenswrapper[5048]: I1213 06:59:48.048912 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-7ksgw"] Dec 13 06:59:48 crc kubenswrapper[5048]: I1213 06:59:48.579032 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34ff9369-df92-416e-a391-18b362cd491f" path="/var/lib/kubelet/pods/34ff9369-df92-416e-a391-18b362cd491f/volumes" Dec 13 06:59:52 crc kubenswrapper[5048]: I1213 06:59:52.302793 5048 scope.go:117] "RemoveContainer" containerID="35c9b8fac43d8dd833b06d4173252abc50df46149293138c6f6b26d139e5980f" Dec 13 06:59:52 crc kubenswrapper[5048]: I1213 06:59:52.331953 5048 scope.go:117] "RemoveContainer" containerID="a2c4f6a3fb24cf8b9fff1bda7e6f4164423ce94e4e26cfe3cb7c82380aab60b1" Dec 13 06:59:52 crc kubenswrapper[5048]: I1213 06:59:52.379157 5048 scope.go:117] "RemoveContainer" containerID="3070acb24c92f47da73bd14c2957f4daa8dc7e966234b03154e73a3445c788b1" Dec 13 06:59:52 crc kubenswrapper[5048]: I1213 06:59:52.424741 5048 scope.go:117] "RemoveContainer" containerID="7bc793e1a64b123b2edbf3651f7addc018f948f75e8ca0093d09fc5b99a1b465" Dec 13 06:59:52 crc kubenswrapper[5048]: I1213 06:59:52.468341 5048 scope.go:117] "RemoveContainer" containerID="91f972f3ba413690e89f2f05548d1680b8d74a87443549898fe18c686de2c727" Dec 13 06:59:52 crc kubenswrapper[5048]: I1213 06:59:52.567058 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 06:59:52 crc kubenswrapper[5048]: E1213 06:59:52.567354 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 06:59:52 crc kubenswrapper[5048]: I1213 06:59:52.567938 5048 scope.go:117] "RemoveContainer" containerID="2d144f7ad2976b897eb79a7c1f0dda93d8476a625581e9c46d6e945dc2365ac3" Dec 13 06:59:52 crc kubenswrapper[5048]: I1213 06:59:52.620899 5048 scope.go:117] "RemoveContainer" containerID="82442e432e821a4d252f2716b7c9b49086a27d00fb2c25035a6b3287adb9cee8" Dec 13 06:59:52 crc kubenswrapper[5048]: I1213 06:59:52.672749 5048 scope.go:117] "RemoveContainer" containerID="27fbcde69ec116cfdf5baaf699c29cca5ef0f26fb9d19add532f43b2389a8475" Dec 13 06:59:52 crc kubenswrapper[5048]: I1213 06:59:52.695141 5048 scope.go:117] "RemoveContainer" containerID="ef7f01bdd05b3ad43ec1c9d4430e36471ec8d83f92a657215c6eef5ee1d95ccd" Dec 13 06:59:54 crc kubenswrapper[5048]: I1213 06:59:54.029197 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-75nl7"] Dec 13 06:59:54 crc kubenswrapper[5048]: I1213 06:59:54.036746 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-75nl7"] Dec 13 06:59:54 crc kubenswrapper[5048]: I1213 06:59:54.641542 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e351b290-4e5b-496d-94be-545f01ae8e15" path="/var/lib/kubelet/pods/e351b290-4e5b-496d-94be-545f01ae8e15/volumes" Dec 13 06:59:55 crc kubenswrapper[5048]: I1213 06:59:55.855307 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-h924v" Dec 13 06:59:55 crc kubenswrapper[5048]: I1213 06:59:55.904768 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-h924v" Dec 13 06:59:55 crc kubenswrapper[5048]: I1213 06:59:55.929140 5048 generic.go:334] "Generic (PLEG): container finished" podID="78eb5a3b-7802-4507-bb35-37cc2e8edb56" containerID="47ca660f909e9c4b43aff4e50d19155115e7470cde8c68325ebd97f13c871d86" exitCode=0 Dec 13 06:59:55 crc kubenswrapper[5048]: I1213 06:59:55.930039 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-j46dh" event={"ID":"78eb5a3b-7802-4507-bb35-37cc2e8edb56","Type":"ContainerDied","Data":"47ca660f909e9c4b43aff4e50d19155115e7470cde8c68325ebd97f13c871d86"} Dec 13 06:59:56 crc kubenswrapper[5048]: I1213 06:59:56.696109 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h924v"] Dec 13 06:59:56 crc kubenswrapper[5048]: I1213 06:59:56.893361 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-bpcbd" Dec 13 06:59:56 crc kubenswrapper[5048]: I1213 06:59:56.940740 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-h924v" podUID="645f2702-d44d-48c7-93e6-422b82cc3eb3" containerName="registry-server" containerID="cri-o://acd5395074f03239d831a1cf05e52d75e4e0800b8f8cfdc7d1189f9df1260b51" gracePeriod=2 Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.423045 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-j46dh" Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.432352 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h924v" Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.443127 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78eb5a3b-7802-4507-bb35-37cc2e8edb56-inventory\") pod \"78eb5a3b-7802-4507-bb35-37cc2e8edb56\" (UID: \"78eb5a3b-7802-4507-bb35-37cc2e8edb56\") " Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.443275 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/645f2702-d44d-48c7-93e6-422b82cc3eb3-utilities\") pod \"645f2702-d44d-48c7-93e6-422b82cc3eb3\" (UID: \"645f2702-d44d-48c7-93e6-422b82cc3eb3\") " Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.443330 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7kt4\" (UniqueName: \"kubernetes.io/projected/645f2702-d44d-48c7-93e6-422b82cc3eb3-kube-api-access-t7kt4\") pod \"645f2702-d44d-48c7-93e6-422b82cc3eb3\" (UID: \"645f2702-d44d-48c7-93e6-422b82cc3eb3\") " Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.443460 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/78eb5a3b-7802-4507-bb35-37cc2e8edb56-ssh-key\") pod \"78eb5a3b-7802-4507-bb35-37cc2e8edb56\" (UID: \"78eb5a3b-7802-4507-bb35-37cc2e8edb56\") " Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.443620 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/645f2702-d44d-48c7-93e6-422b82cc3eb3-catalog-content\") pod \"645f2702-d44d-48c7-93e6-422b82cc3eb3\" (UID: \"645f2702-d44d-48c7-93e6-422b82cc3eb3\") " Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.443664 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fpp72\" (UniqueName: \"kubernetes.io/projected/78eb5a3b-7802-4507-bb35-37cc2e8edb56-kube-api-access-fpp72\") pod \"78eb5a3b-7802-4507-bb35-37cc2e8edb56\" (UID: \"78eb5a3b-7802-4507-bb35-37cc2e8edb56\") " Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.444876 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/645f2702-d44d-48c7-93e6-422b82cc3eb3-utilities" (OuterVolumeSpecName: "utilities") pod "645f2702-d44d-48c7-93e6-422b82cc3eb3" (UID: "645f2702-d44d-48c7-93e6-422b82cc3eb3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.445136 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/645f2702-d44d-48c7-93e6-422b82cc3eb3-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.450818 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/645f2702-d44d-48c7-93e6-422b82cc3eb3-kube-api-access-t7kt4" (OuterVolumeSpecName: "kube-api-access-t7kt4") pod "645f2702-d44d-48c7-93e6-422b82cc3eb3" (UID: "645f2702-d44d-48c7-93e6-422b82cc3eb3"). InnerVolumeSpecName "kube-api-access-t7kt4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.451330 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78eb5a3b-7802-4507-bb35-37cc2e8edb56-kube-api-access-fpp72" (OuterVolumeSpecName: "kube-api-access-fpp72") pod "78eb5a3b-7802-4507-bb35-37cc2e8edb56" (UID: "78eb5a3b-7802-4507-bb35-37cc2e8edb56"). InnerVolumeSpecName "kube-api-access-fpp72". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.485630 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78eb5a3b-7802-4507-bb35-37cc2e8edb56-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "78eb5a3b-7802-4507-bb35-37cc2e8edb56" (UID: "78eb5a3b-7802-4507-bb35-37cc2e8edb56"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.502522 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78eb5a3b-7802-4507-bb35-37cc2e8edb56-inventory" (OuterVolumeSpecName: "inventory") pod "78eb5a3b-7802-4507-bb35-37cc2e8edb56" (UID: "78eb5a3b-7802-4507-bb35-37cc2e8edb56"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.546297 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7kt4\" (UniqueName: \"kubernetes.io/projected/645f2702-d44d-48c7-93e6-422b82cc3eb3-kube-api-access-t7kt4\") on node \"crc\" DevicePath \"\"" Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.546332 5048 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/78eb5a3b-7802-4507-bb35-37cc2e8edb56-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.546342 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fpp72\" (UniqueName: \"kubernetes.io/projected/78eb5a3b-7802-4507-bb35-37cc2e8edb56-kube-api-access-fpp72\") on node \"crc\" DevicePath \"\"" Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.546351 5048 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78eb5a3b-7802-4507-bb35-37cc2e8edb56-inventory\") on node \"crc\" DevicePath \"\"" Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.578025 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/645f2702-d44d-48c7-93e6-422b82cc3eb3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "645f2702-d44d-48c7-93e6-422b82cc3eb3" (UID: "645f2702-d44d-48c7-93e6-422b82cc3eb3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.648310 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/645f2702-d44d-48c7-93e6-422b82cc3eb3-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.953123 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-j46dh" event={"ID":"78eb5a3b-7802-4507-bb35-37cc2e8edb56","Type":"ContainerDied","Data":"db6dff42e064653b90de5d508e8fbf361c2dbdea024be1825504bcfe0e85ab1b"} Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.953173 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-j46dh" Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.953187 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="db6dff42e064653b90de5d508e8fbf361c2dbdea024be1825504bcfe0e85ab1b" Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.957134 5048 generic.go:334] "Generic (PLEG): container finished" podID="645f2702-d44d-48c7-93e6-422b82cc3eb3" containerID="acd5395074f03239d831a1cf05e52d75e4e0800b8f8cfdc7d1189f9df1260b51" exitCode=0 Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.957199 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h924v" event={"ID":"645f2702-d44d-48c7-93e6-422b82cc3eb3","Type":"ContainerDied","Data":"acd5395074f03239d831a1cf05e52d75e4e0800b8f8cfdc7d1189f9df1260b51"} Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.957273 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h924v" event={"ID":"645f2702-d44d-48c7-93e6-422b82cc3eb3","Type":"ContainerDied","Data":"1f880638d8a10c77c50c4d5e1454a95d9c0b5584ec7be0014f01f80c95fd4b84"} Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.957216 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h924v" Dec 13 06:59:57 crc kubenswrapper[5048]: I1213 06:59:57.957306 5048 scope.go:117] "RemoveContainer" containerID="acd5395074f03239d831a1cf05e52d75e4e0800b8f8cfdc7d1189f9df1260b51" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.074253 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h924v"] Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.088633 5048 scope.go:117] "RemoveContainer" containerID="41181a726270c709cb862a71f0f599e4359c2d2cd1a4b470a89902360bc5cf4e" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.089311 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-h924v"] Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.121875 5048 scope.go:117] "RemoveContainer" containerID="d2f5796484f9da2329169b1fd0b26d104e18e06271fd3251b6cd55d3fbc80790" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.143463 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc"] Dec 13 06:59:58 crc kubenswrapper[5048]: E1213 06:59:58.143923 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78eb5a3b-7802-4507-bb35-37cc2e8edb56" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.143951 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="78eb5a3b-7802-4507-bb35-37cc2e8edb56" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 13 06:59:58 crc kubenswrapper[5048]: E1213 06:59:58.143966 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="645f2702-d44d-48c7-93e6-422b82cc3eb3" containerName="registry-server" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.143976 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="645f2702-d44d-48c7-93e6-422b82cc3eb3" containerName="registry-server" Dec 13 06:59:58 crc kubenswrapper[5048]: E1213 06:59:58.143997 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="645f2702-d44d-48c7-93e6-422b82cc3eb3" containerName="extract-utilities" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.144006 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="645f2702-d44d-48c7-93e6-422b82cc3eb3" containerName="extract-utilities" Dec 13 06:59:58 crc kubenswrapper[5048]: E1213 06:59:58.144033 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="645f2702-d44d-48c7-93e6-422b82cc3eb3" containerName="extract-content" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.144041 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="645f2702-d44d-48c7-93e6-422b82cc3eb3" containerName="extract-content" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.144257 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="78eb5a3b-7802-4507-bb35-37cc2e8edb56" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.144284 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="645f2702-d44d-48c7-93e6-422b82cc3eb3" containerName="registry-server" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.146623 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.148922 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.149242 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.149514 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hgp7p" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.149682 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.157284 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc"] Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.160501 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7688\" (UniqueName: \"kubernetes.io/projected/7df2d78c-502d-4d7c-9233-cf01992cab77-kube-api-access-j7688\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc\" (UID: \"7df2d78c-502d-4d7c-9233-cf01992cab77\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.160603 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7df2d78c-502d-4d7c-9233-cf01992cab77-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc\" (UID: \"7df2d78c-502d-4d7c-9233-cf01992cab77\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.160646 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7df2d78c-502d-4d7c-9233-cf01992cab77-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc\" (UID: \"7df2d78c-502d-4d7c-9233-cf01992cab77\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.184030 5048 scope.go:117] "RemoveContainer" containerID="acd5395074f03239d831a1cf05e52d75e4e0800b8f8cfdc7d1189f9df1260b51" Dec 13 06:59:58 crc kubenswrapper[5048]: E1213 06:59:58.184999 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"acd5395074f03239d831a1cf05e52d75e4e0800b8f8cfdc7d1189f9df1260b51\": container with ID starting with acd5395074f03239d831a1cf05e52d75e4e0800b8f8cfdc7d1189f9df1260b51 not found: ID does not exist" containerID="acd5395074f03239d831a1cf05e52d75e4e0800b8f8cfdc7d1189f9df1260b51" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.185045 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"acd5395074f03239d831a1cf05e52d75e4e0800b8f8cfdc7d1189f9df1260b51"} err="failed to get container status \"acd5395074f03239d831a1cf05e52d75e4e0800b8f8cfdc7d1189f9df1260b51\": rpc error: code = NotFound desc = could not find container \"acd5395074f03239d831a1cf05e52d75e4e0800b8f8cfdc7d1189f9df1260b51\": container with ID starting with acd5395074f03239d831a1cf05e52d75e4e0800b8f8cfdc7d1189f9df1260b51 not found: ID does not exist" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.185077 5048 scope.go:117] "RemoveContainer" containerID="41181a726270c709cb862a71f0f599e4359c2d2cd1a4b470a89902360bc5cf4e" Dec 13 06:59:58 crc kubenswrapper[5048]: E1213 06:59:58.185534 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"41181a726270c709cb862a71f0f599e4359c2d2cd1a4b470a89902360bc5cf4e\": container with ID starting with 41181a726270c709cb862a71f0f599e4359c2d2cd1a4b470a89902360bc5cf4e not found: ID does not exist" containerID="41181a726270c709cb862a71f0f599e4359c2d2cd1a4b470a89902360bc5cf4e" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.185573 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41181a726270c709cb862a71f0f599e4359c2d2cd1a4b470a89902360bc5cf4e"} err="failed to get container status \"41181a726270c709cb862a71f0f599e4359c2d2cd1a4b470a89902360bc5cf4e\": rpc error: code = NotFound desc = could not find container \"41181a726270c709cb862a71f0f599e4359c2d2cd1a4b470a89902360bc5cf4e\": container with ID starting with 41181a726270c709cb862a71f0f599e4359c2d2cd1a4b470a89902360bc5cf4e not found: ID does not exist" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.185605 5048 scope.go:117] "RemoveContainer" containerID="d2f5796484f9da2329169b1fd0b26d104e18e06271fd3251b6cd55d3fbc80790" Dec 13 06:59:58 crc kubenswrapper[5048]: E1213 06:59:58.185939 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2f5796484f9da2329169b1fd0b26d104e18e06271fd3251b6cd55d3fbc80790\": container with ID starting with d2f5796484f9da2329169b1fd0b26d104e18e06271fd3251b6cd55d3fbc80790 not found: ID does not exist" containerID="d2f5796484f9da2329169b1fd0b26d104e18e06271fd3251b6cd55d3fbc80790" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.185976 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2f5796484f9da2329169b1fd0b26d104e18e06271fd3251b6cd55d3fbc80790"} err="failed to get container status \"d2f5796484f9da2329169b1fd0b26d104e18e06271fd3251b6cd55d3fbc80790\": rpc error: code = NotFound desc = could not find container \"d2f5796484f9da2329169b1fd0b26d104e18e06271fd3251b6cd55d3fbc80790\": container with ID starting with d2f5796484f9da2329169b1fd0b26d104e18e06271fd3251b6cd55d3fbc80790 not found: ID does not exist" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.262790 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7688\" (UniqueName: \"kubernetes.io/projected/7df2d78c-502d-4d7c-9233-cf01992cab77-kube-api-access-j7688\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc\" (UID: \"7df2d78c-502d-4d7c-9233-cf01992cab77\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.262951 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7df2d78c-502d-4d7c-9233-cf01992cab77-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc\" (UID: \"7df2d78c-502d-4d7c-9233-cf01992cab77\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.263010 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7df2d78c-502d-4d7c-9233-cf01992cab77-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc\" (UID: \"7df2d78c-502d-4d7c-9233-cf01992cab77\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.267613 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7df2d78c-502d-4d7c-9233-cf01992cab77-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc\" (UID: \"7df2d78c-502d-4d7c-9233-cf01992cab77\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.267619 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7df2d78c-502d-4d7c-9233-cf01992cab77-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc\" (UID: \"7df2d78c-502d-4d7c-9233-cf01992cab77\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.282065 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7688\" (UniqueName: \"kubernetes.io/projected/7df2d78c-502d-4d7c-9233-cf01992cab77-kube-api-access-j7688\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc\" (UID: \"7df2d78c-502d-4d7c-9233-cf01992cab77\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.504354 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc" Dec 13 06:59:58 crc kubenswrapper[5048]: I1213 06:59:58.579044 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="645f2702-d44d-48c7-93e6-422b82cc3eb3" path="/var/lib/kubelet/pods/645f2702-d44d-48c7-93e6-422b82cc3eb3/volumes" Dec 13 06:59:59 crc kubenswrapper[5048]: I1213 06:59:59.143767 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc"] Dec 13 06:59:59 crc kubenswrapper[5048]: I1213 06:59:59.254977 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bpcbd"] Dec 13 06:59:59 crc kubenswrapper[5048]: I1213 06:59:59.255251 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-bpcbd" podUID="f03b25c0-0fb6-42b4-84d9-2fdf4498170e" containerName="registry-server" containerID="cri-o://8dde5af7779d318c2382a16f326be48c08ceff5ede7a8303c38c1451d16ae093" gracePeriod=2 Dec 13 06:59:59 crc kubenswrapper[5048]: I1213 06:59:59.668782 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bpcbd" Dec 13 06:59:59 crc kubenswrapper[5048]: I1213 06:59:59.760933 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mhv8t\" (UniqueName: \"kubernetes.io/projected/f03b25c0-0fb6-42b4-84d9-2fdf4498170e-kube-api-access-mhv8t\") pod \"f03b25c0-0fb6-42b4-84d9-2fdf4498170e\" (UID: \"f03b25c0-0fb6-42b4-84d9-2fdf4498170e\") " Dec 13 06:59:59 crc kubenswrapper[5048]: I1213 06:59:59.761086 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f03b25c0-0fb6-42b4-84d9-2fdf4498170e-catalog-content\") pod \"f03b25c0-0fb6-42b4-84d9-2fdf4498170e\" (UID: \"f03b25c0-0fb6-42b4-84d9-2fdf4498170e\") " Dec 13 06:59:59 crc kubenswrapper[5048]: I1213 06:59:59.761128 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f03b25c0-0fb6-42b4-84d9-2fdf4498170e-utilities\") pod \"f03b25c0-0fb6-42b4-84d9-2fdf4498170e\" (UID: \"f03b25c0-0fb6-42b4-84d9-2fdf4498170e\") " Dec 13 06:59:59 crc kubenswrapper[5048]: I1213 06:59:59.762649 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f03b25c0-0fb6-42b4-84d9-2fdf4498170e-utilities" (OuterVolumeSpecName: "utilities") pod "f03b25c0-0fb6-42b4-84d9-2fdf4498170e" (UID: "f03b25c0-0fb6-42b4-84d9-2fdf4498170e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:59:59 crc kubenswrapper[5048]: I1213 06:59:59.765180 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f03b25c0-0fb6-42b4-84d9-2fdf4498170e-kube-api-access-mhv8t" (OuterVolumeSpecName: "kube-api-access-mhv8t") pod "f03b25c0-0fb6-42b4-84d9-2fdf4498170e" (UID: "f03b25c0-0fb6-42b4-84d9-2fdf4498170e"). InnerVolumeSpecName "kube-api-access-mhv8t". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 06:59:59 crc kubenswrapper[5048]: I1213 06:59:59.815991 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f03b25c0-0fb6-42b4-84d9-2fdf4498170e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f03b25c0-0fb6-42b4-84d9-2fdf4498170e" (UID: "f03b25c0-0fb6-42b4-84d9-2fdf4498170e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 06:59:59 crc kubenswrapper[5048]: I1213 06:59:59.863662 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f03b25c0-0fb6-42b4-84d9-2fdf4498170e-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 06:59:59 crc kubenswrapper[5048]: I1213 06:59:59.863691 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f03b25c0-0fb6-42b4-84d9-2fdf4498170e-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 06:59:59 crc kubenswrapper[5048]: I1213 06:59:59.863700 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mhv8t\" (UniqueName: \"kubernetes.io/projected/f03b25c0-0fb6-42b4-84d9-2fdf4498170e-kube-api-access-mhv8t\") on node \"crc\" DevicePath \"\"" Dec 13 06:59:59 crc kubenswrapper[5048]: I1213 06:59:59.977930 5048 generic.go:334] "Generic (PLEG): container finished" podID="f03b25c0-0fb6-42b4-84d9-2fdf4498170e" containerID="8dde5af7779d318c2382a16f326be48c08ceff5ede7a8303c38c1451d16ae093" exitCode=0 Dec 13 06:59:59 crc kubenswrapper[5048]: I1213 06:59:59.978007 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bpcbd" Dec 13 06:59:59 crc kubenswrapper[5048]: I1213 06:59:59.978050 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bpcbd" event={"ID":"f03b25c0-0fb6-42b4-84d9-2fdf4498170e","Type":"ContainerDied","Data":"8dde5af7779d318c2382a16f326be48c08ceff5ede7a8303c38c1451d16ae093"} Dec 13 06:59:59 crc kubenswrapper[5048]: I1213 06:59:59.978088 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bpcbd" event={"ID":"f03b25c0-0fb6-42b4-84d9-2fdf4498170e","Type":"ContainerDied","Data":"8e13cedac378b652257daf8ce2a427f6064ca73a2477110f35bc9411fa54de98"} Dec 13 06:59:59 crc kubenswrapper[5048]: I1213 06:59:59.978112 5048 scope.go:117] "RemoveContainer" containerID="8dde5af7779d318c2382a16f326be48c08ceff5ede7a8303c38c1451d16ae093" Dec 13 06:59:59 crc kubenswrapper[5048]: I1213 06:59:59.981114 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc" event={"ID":"7df2d78c-502d-4d7c-9233-cf01992cab77","Type":"ContainerStarted","Data":"18dedaca239702223d98fa712408a0120e2fd973414962d2e7c19809d9eb41e4"} Dec 13 06:59:59 crc kubenswrapper[5048]: I1213 06:59:59.981149 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc" event={"ID":"7df2d78c-502d-4d7c-9233-cf01992cab77","Type":"ContainerStarted","Data":"5724406cc79a2923f9463cc5298b1c7953eed320eca4d0e522deb7b572080ecc"} Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.001520 5048 scope.go:117] "RemoveContainer" containerID="87e8dec7a48f06e6f90ff84797ecf65779d6d73578fa0c521934a4aceee4fcd6" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.016544 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc" podStartSLOduration=1.51780795 podStartE2EDuration="2.016527499s" podCreationTimestamp="2025-12-13 06:59:58 +0000 UTC" firstStartedPulling="2025-12-13 06:59:59.154377487 +0000 UTC m=+1833.020972068" lastFinishedPulling="2025-12-13 06:59:59.653097036 +0000 UTC m=+1833.519691617" observedRunningTime="2025-12-13 07:00:00.00874525 +0000 UTC m=+1833.875339901" watchObservedRunningTime="2025-12-13 07:00:00.016527499 +0000 UTC m=+1833.883122070" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.030384 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bpcbd"] Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.037959 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-bpcbd"] Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.055822 5048 scope.go:117] "RemoveContainer" containerID="4f2abdef929ec72e7d1b6e95fa0434bb81921826a40089210f49527acf6e850d" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.074368 5048 scope.go:117] "RemoveContainer" containerID="8dde5af7779d318c2382a16f326be48c08ceff5ede7a8303c38c1451d16ae093" Dec 13 07:00:00 crc kubenswrapper[5048]: E1213 07:00:00.074904 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8dde5af7779d318c2382a16f326be48c08ceff5ede7a8303c38c1451d16ae093\": container with ID starting with 8dde5af7779d318c2382a16f326be48c08ceff5ede7a8303c38c1451d16ae093 not found: ID does not exist" containerID="8dde5af7779d318c2382a16f326be48c08ceff5ede7a8303c38c1451d16ae093" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.074956 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8dde5af7779d318c2382a16f326be48c08ceff5ede7a8303c38c1451d16ae093"} err="failed to get container status \"8dde5af7779d318c2382a16f326be48c08ceff5ede7a8303c38c1451d16ae093\": rpc error: code = NotFound desc = could not find container \"8dde5af7779d318c2382a16f326be48c08ceff5ede7a8303c38c1451d16ae093\": container with ID starting with 8dde5af7779d318c2382a16f326be48c08ceff5ede7a8303c38c1451d16ae093 not found: ID does not exist" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.074990 5048 scope.go:117] "RemoveContainer" containerID="87e8dec7a48f06e6f90ff84797ecf65779d6d73578fa0c521934a4aceee4fcd6" Dec 13 07:00:00 crc kubenswrapper[5048]: E1213 07:00:00.075337 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87e8dec7a48f06e6f90ff84797ecf65779d6d73578fa0c521934a4aceee4fcd6\": container with ID starting with 87e8dec7a48f06e6f90ff84797ecf65779d6d73578fa0c521934a4aceee4fcd6 not found: ID does not exist" containerID="87e8dec7a48f06e6f90ff84797ecf65779d6d73578fa0c521934a4aceee4fcd6" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.075366 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87e8dec7a48f06e6f90ff84797ecf65779d6d73578fa0c521934a4aceee4fcd6"} err="failed to get container status \"87e8dec7a48f06e6f90ff84797ecf65779d6d73578fa0c521934a4aceee4fcd6\": rpc error: code = NotFound desc = could not find container \"87e8dec7a48f06e6f90ff84797ecf65779d6d73578fa0c521934a4aceee4fcd6\": container with ID starting with 87e8dec7a48f06e6f90ff84797ecf65779d6d73578fa0c521934a4aceee4fcd6 not found: ID does not exist" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.075391 5048 scope.go:117] "RemoveContainer" containerID="4f2abdef929ec72e7d1b6e95fa0434bb81921826a40089210f49527acf6e850d" Dec 13 07:00:00 crc kubenswrapper[5048]: E1213 07:00:00.075726 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f2abdef929ec72e7d1b6e95fa0434bb81921826a40089210f49527acf6e850d\": container with ID starting with 4f2abdef929ec72e7d1b6e95fa0434bb81921826a40089210f49527acf6e850d not found: ID does not exist" containerID="4f2abdef929ec72e7d1b6e95fa0434bb81921826a40089210f49527acf6e850d" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.075748 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f2abdef929ec72e7d1b6e95fa0434bb81921826a40089210f49527acf6e850d"} err="failed to get container status \"4f2abdef929ec72e7d1b6e95fa0434bb81921826a40089210f49527acf6e850d\": rpc error: code = NotFound desc = could not find container \"4f2abdef929ec72e7d1b6e95fa0434bb81921826a40089210f49527acf6e850d\": container with ID starting with 4f2abdef929ec72e7d1b6e95fa0434bb81921826a40089210f49527acf6e850d not found: ID does not exist" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.140867 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426820-pghsg"] Dec 13 07:00:00 crc kubenswrapper[5048]: E1213 07:00:00.141335 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f03b25c0-0fb6-42b4-84d9-2fdf4498170e" containerName="extract-utilities" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.141357 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="f03b25c0-0fb6-42b4-84d9-2fdf4498170e" containerName="extract-utilities" Dec 13 07:00:00 crc kubenswrapper[5048]: E1213 07:00:00.141405 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f03b25c0-0fb6-42b4-84d9-2fdf4498170e" containerName="extract-content" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.141417 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="f03b25c0-0fb6-42b4-84d9-2fdf4498170e" containerName="extract-content" Dec 13 07:00:00 crc kubenswrapper[5048]: E1213 07:00:00.143531 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f03b25c0-0fb6-42b4-84d9-2fdf4498170e" containerName="registry-server" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.143605 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="f03b25c0-0fb6-42b4-84d9-2fdf4498170e" containerName="registry-server" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.144126 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="f03b25c0-0fb6-42b4-84d9-2fdf4498170e" containerName="registry-server" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.145071 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426820-pghsg" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.148264 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.148275 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.151997 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426820-pghsg"] Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.271149 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f5a1a5d-316c-4a98-80ab-3ee688ab0672-config-volume\") pod \"collect-profiles-29426820-pghsg\" (UID: \"7f5a1a5d-316c-4a98-80ab-3ee688ab0672\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426820-pghsg" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.271226 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f5a1a5d-316c-4a98-80ab-3ee688ab0672-secret-volume\") pod \"collect-profiles-29426820-pghsg\" (UID: \"7f5a1a5d-316c-4a98-80ab-3ee688ab0672\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426820-pghsg" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.271584 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8fs9\" (UniqueName: \"kubernetes.io/projected/7f5a1a5d-316c-4a98-80ab-3ee688ab0672-kube-api-access-p8fs9\") pod \"collect-profiles-29426820-pghsg\" (UID: \"7f5a1a5d-316c-4a98-80ab-3ee688ab0672\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426820-pghsg" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.373350 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8fs9\" (UniqueName: \"kubernetes.io/projected/7f5a1a5d-316c-4a98-80ab-3ee688ab0672-kube-api-access-p8fs9\") pod \"collect-profiles-29426820-pghsg\" (UID: \"7f5a1a5d-316c-4a98-80ab-3ee688ab0672\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426820-pghsg" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.373448 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f5a1a5d-316c-4a98-80ab-3ee688ab0672-config-volume\") pod \"collect-profiles-29426820-pghsg\" (UID: \"7f5a1a5d-316c-4a98-80ab-3ee688ab0672\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426820-pghsg" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.373493 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f5a1a5d-316c-4a98-80ab-3ee688ab0672-secret-volume\") pod \"collect-profiles-29426820-pghsg\" (UID: \"7f5a1a5d-316c-4a98-80ab-3ee688ab0672\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426820-pghsg" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.374491 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f5a1a5d-316c-4a98-80ab-3ee688ab0672-config-volume\") pod \"collect-profiles-29426820-pghsg\" (UID: \"7f5a1a5d-316c-4a98-80ab-3ee688ab0672\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426820-pghsg" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.378140 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f5a1a5d-316c-4a98-80ab-3ee688ab0672-secret-volume\") pod \"collect-profiles-29426820-pghsg\" (UID: \"7f5a1a5d-316c-4a98-80ab-3ee688ab0672\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426820-pghsg" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.393119 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8fs9\" (UniqueName: \"kubernetes.io/projected/7f5a1a5d-316c-4a98-80ab-3ee688ab0672-kube-api-access-p8fs9\") pod \"collect-profiles-29426820-pghsg\" (UID: \"7f5a1a5d-316c-4a98-80ab-3ee688ab0672\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426820-pghsg" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.493113 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426820-pghsg" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.579148 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f03b25c0-0fb6-42b4-84d9-2fdf4498170e" path="/var/lib/kubelet/pods/f03b25c0-0fb6-42b4-84d9-2fdf4498170e/volumes" Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.944134 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426820-pghsg"] Dec 13 07:00:00 crc kubenswrapper[5048]: W1213 07:00:00.946241 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7f5a1a5d_316c_4a98_80ab_3ee688ab0672.slice/crio-8ea6c6aa744a9835e2aa6a252d3891344e5f0846a1cc8b614b747c9f0de2ad8e WatchSource:0}: Error finding container 8ea6c6aa744a9835e2aa6a252d3891344e5f0846a1cc8b614b747c9f0de2ad8e: Status 404 returned error can't find the container with id 8ea6c6aa744a9835e2aa6a252d3891344e5f0846a1cc8b614b747c9f0de2ad8e Dec 13 07:00:00 crc kubenswrapper[5048]: I1213 07:00:00.991130 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29426820-pghsg" event={"ID":"7f5a1a5d-316c-4a98-80ab-3ee688ab0672","Type":"ContainerStarted","Data":"8ea6c6aa744a9835e2aa6a252d3891344e5f0846a1cc8b614b747c9f0de2ad8e"} Dec 13 07:00:02 crc kubenswrapper[5048]: I1213 07:00:02.001090 5048 generic.go:334] "Generic (PLEG): container finished" podID="7f5a1a5d-316c-4a98-80ab-3ee688ab0672" containerID="ef3d98b8906b0a293e0abe9c9bb145ae32ed4b80a4516b5d5010ba3e7427f4db" exitCode=0 Dec 13 07:00:02 crc kubenswrapper[5048]: I1213 07:00:02.001313 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29426820-pghsg" event={"ID":"7f5a1a5d-316c-4a98-80ab-3ee688ab0672","Type":"ContainerDied","Data":"ef3d98b8906b0a293e0abe9c9bb145ae32ed4b80a4516b5d5010ba3e7427f4db"} Dec 13 07:00:03 crc kubenswrapper[5048]: I1213 07:00:03.363185 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426820-pghsg" Dec 13 07:00:03 crc kubenswrapper[5048]: I1213 07:00:03.529750 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f5a1a5d-316c-4a98-80ab-3ee688ab0672-config-volume\") pod \"7f5a1a5d-316c-4a98-80ab-3ee688ab0672\" (UID: \"7f5a1a5d-316c-4a98-80ab-3ee688ab0672\") " Dec 13 07:00:03 crc kubenswrapper[5048]: I1213 07:00:03.529895 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p8fs9\" (UniqueName: \"kubernetes.io/projected/7f5a1a5d-316c-4a98-80ab-3ee688ab0672-kube-api-access-p8fs9\") pod \"7f5a1a5d-316c-4a98-80ab-3ee688ab0672\" (UID: \"7f5a1a5d-316c-4a98-80ab-3ee688ab0672\") " Dec 13 07:00:03 crc kubenswrapper[5048]: I1213 07:00:03.529945 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f5a1a5d-316c-4a98-80ab-3ee688ab0672-secret-volume\") pod \"7f5a1a5d-316c-4a98-80ab-3ee688ab0672\" (UID: \"7f5a1a5d-316c-4a98-80ab-3ee688ab0672\") " Dec 13 07:00:03 crc kubenswrapper[5048]: I1213 07:00:03.531230 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f5a1a5d-316c-4a98-80ab-3ee688ab0672-config-volume" (OuterVolumeSpecName: "config-volume") pod "7f5a1a5d-316c-4a98-80ab-3ee688ab0672" (UID: "7f5a1a5d-316c-4a98-80ab-3ee688ab0672"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 07:00:03 crc kubenswrapper[5048]: I1213 07:00:03.535861 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f5a1a5d-316c-4a98-80ab-3ee688ab0672-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "7f5a1a5d-316c-4a98-80ab-3ee688ab0672" (UID: "7f5a1a5d-316c-4a98-80ab-3ee688ab0672"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:00:03 crc kubenswrapper[5048]: I1213 07:00:03.549819 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f5a1a5d-316c-4a98-80ab-3ee688ab0672-kube-api-access-p8fs9" (OuterVolumeSpecName: "kube-api-access-p8fs9") pod "7f5a1a5d-316c-4a98-80ab-3ee688ab0672" (UID: "7f5a1a5d-316c-4a98-80ab-3ee688ab0672"). InnerVolumeSpecName "kube-api-access-p8fs9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:00:03 crc kubenswrapper[5048]: I1213 07:00:03.632284 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p8fs9\" (UniqueName: \"kubernetes.io/projected/7f5a1a5d-316c-4a98-80ab-3ee688ab0672-kube-api-access-p8fs9\") on node \"crc\" DevicePath \"\"" Dec 13 07:00:03 crc kubenswrapper[5048]: I1213 07:00:03.632344 5048 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f5a1a5d-316c-4a98-80ab-3ee688ab0672-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 13 07:00:03 crc kubenswrapper[5048]: I1213 07:00:03.632359 5048 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f5a1a5d-316c-4a98-80ab-3ee688ab0672-config-volume\") on node \"crc\" DevicePath \"\"" Dec 13 07:00:04 crc kubenswrapper[5048]: I1213 07:00:04.021507 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29426820-pghsg" event={"ID":"7f5a1a5d-316c-4a98-80ab-3ee688ab0672","Type":"ContainerDied","Data":"8ea6c6aa744a9835e2aa6a252d3891344e5f0846a1cc8b614b747c9f0de2ad8e"} Dec 13 07:00:04 crc kubenswrapper[5048]: I1213 07:00:04.021868 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ea6c6aa744a9835e2aa6a252d3891344e5f0846a1cc8b614b747c9f0de2ad8e" Dec 13 07:00:04 crc kubenswrapper[5048]: I1213 07:00:04.021927 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426820-pghsg" Dec 13 07:00:04 crc kubenswrapper[5048]: I1213 07:00:04.047599 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-jzn5h"] Dec 13 07:00:04 crc kubenswrapper[5048]: I1213 07:00:04.064190 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-jzn5h"] Dec 13 07:00:04 crc kubenswrapper[5048]: I1213 07:00:04.580530 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d92d376-9d6c-404a-b1f5-a13a67276b6f" path="/var/lib/kubelet/pods/2d92d376-9d6c-404a-b1f5-a13a67276b6f/volumes" Dec 13 07:00:05 crc kubenswrapper[5048]: I1213 07:00:05.567324 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 07:00:05 crc kubenswrapper[5048]: E1213 07:00:05.568196 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:00:08 crc kubenswrapper[5048]: I1213 07:00:08.044235 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-pzgrc"] Dec 13 07:00:08 crc kubenswrapper[5048]: I1213 07:00:08.054167 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-pzgrc"] Dec 13 07:00:08 crc kubenswrapper[5048]: I1213 07:00:08.579174 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af6830b6-96f2-487f-ba02-93c7f01d0ceb" path="/var/lib/kubelet/pods/af6830b6-96f2-487f-ba02-93c7f01d0ceb/volumes" Dec 13 07:00:12 crc kubenswrapper[5048]: I1213 07:00:12.027600 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-9xjhb"] Dec 13 07:00:12 crc kubenswrapper[5048]: I1213 07:00:12.037406 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-9xjhb"] Dec 13 07:00:12 crc kubenswrapper[5048]: I1213 07:00:12.576768 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a4a2da1-cc7f-474f-baf7-16c352bd0708" path="/var/lib/kubelet/pods/8a4a2da1-cc7f-474f-baf7-16c352bd0708/volumes" Dec 13 07:00:16 crc kubenswrapper[5048]: I1213 07:00:16.574728 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 07:00:16 crc kubenswrapper[5048]: E1213 07:00:16.577277 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:00:28 crc kubenswrapper[5048]: I1213 07:00:28.569423 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 07:00:28 crc kubenswrapper[5048]: E1213 07:00:28.570321 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:00:40 crc kubenswrapper[5048]: I1213 07:00:40.567657 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 07:00:40 crc kubenswrapper[5048]: E1213 07:00:40.568610 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:00:52 crc kubenswrapper[5048]: I1213 07:00:52.868656 5048 scope.go:117] "RemoveContainer" containerID="696cec693d739a77d0fa2773a7c4e66455dc16b82749b6448d55f0d5b8782b68" Dec 13 07:00:52 crc kubenswrapper[5048]: I1213 07:00:52.925810 5048 scope.go:117] "RemoveContainer" containerID="740fb7bacfa2a3b3b6ad58b4b499b2ec58dc979e363111b4788f11f11437cdd0" Dec 13 07:00:52 crc kubenswrapper[5048]: I1213 07:00:52.979558 5048 scope.go:117] "RemoveContainer" containerID="4e9ffef4eb8dff919102ef5693783b933d9927705c70a2fa4abdd604bc7a3c0f" Dec 13 07:00:53 crc kubenswrapper[5048]: I1213 07:00:53.020496 5048 scope.go:117] "RemoveContainer" containerID="471f7b0313c94298fd2e33c2b09c7fe2912c91488d77ebd1dcd62288fc6dc5f4" Dec 13 07:00:54 crc kubenswrapper[5048]: I1213 07:00:54.566788 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 07:00:54 crc kubenswrapper[5048]: E1213 07:00:54.567551 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:00:57 crc kubenswrapper[5048]: I1213 07:00:57.046528 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-ws26t"] Dec 13 07:00:57 crc kubenswrapper[5048]: I1213 07:00:57.055500 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-cn4pn"] Dec 13 07:00:57 crc kubenswrapper[5048]: I1213 07:00:57.062971 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-ws26t"] Dec 13 07:00:57 crc kubenswrapper[5048]: I1213 07:00:57.071118 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-cn4pn"] Dec 13 07:00:58 crc kubenswrapper[5048]: I1213 07:00:58.055804 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-7adf-account-create-update-52mfj"] Dec 13 07:00:58 crc kubenswrapper[5048]: I1213 07:00:58.071183 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-f16a-account-create-update-kwscr"] Dec 13 07:00:58 crc kubenswrapper[5048]: I1213 07:00:58.080512 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-7adf-account-create-update-52mfj"] Dec 13 07:00:58 crc kubenswrapper[5048]: I1213 07:00:58.087704 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-jnf7w"] Dec 13 07:00:58 crc kubenswrapper[5048]: I1213 07:00:58.095056 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-081b-account-create-update-lvp4w"] Dec 13 07:00:58 crc kubenswrapper[5048]: I1213 07:00:58.102472 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-jnf7w"] Dec 13 07:00:58 crc kubenswrapper[5048]: I1213 07:00:58.109780 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-f16a-account-create-update-kwscr"] Dec 13 07:00:58 crc kubenswrapper[5048]: I1213 07:00:58.117039 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-081b-account-create-update-lvp4w"] Dec 13 07:00:58 crc kubenswrapper[5048]: I1213 07:00:58.581413 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="030ae47b-71e7-4fb3-b518-f807e1fda0a1" path="/var/lib/kubelet/pods/030ae47b-71e7-4fb3-b518-f807e1fda0a1/volumes" Dec 13 07:00:58 crc kubenswrapper[5048]: I1213 07:00:58.582335 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2cbf8637-a57a-4dd4-a175-6ab0886adbc2" path="/var/lib/kubelet/pods/2cbf8637-a57a-4dd4-a175-6ab0886adbc2/volumes" Dec 13 07:00:58 crc kubenswrapper[5048]: I1213 07:00:58.582997 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55848283-b878-4f02-b76f-aaf4ce0d6765" path="/var/lib/kubelet/pods/55848283-b878-4f02-b76f-aaf4ce0d6765/volumes" Dec 13 07:00:58 crc kubenswrapper[5048]: I1213 07:00:58.583657 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ebfb097-2554-4605-bf3c-a545907fbaa6" path="/var/lib/kubelet/pods/8ebfb097-2554-4605-bf3c-a545907fbaa6/volumes" Dec 13 07:00:58 crc kubenswrapper[5048]: I1213 07:00:58.585022 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c250e8c3-7775-44f8-8544-51e69bdaff07" path="/var/lib/kubelet/pods/c250e8c3-7775-44f8-8544-51e69bdaff07/volumes" Dec 13 07:00:58 crc kubenswrapper[5048]: I1213 07:00:58.585946 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d58379fb-a3bb-4853-995d-2df64fa912d4" path="/var/lib/kubelet/pods/d58379fb-a3bb-4853-995d-2df64fa912d4/volumes" Dec 13 07:01:00 crc kubenswrapper[5048]: I1213 07:01:00.169311 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29426821-gd94t"] Dec 13 07:01:00 crc kubenswrapper[5048]: E1213 07:01:00.170162 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f5a1a5d-316c-4a98-80ab-3ee688ab0672" containerName="collect-profiles" Dec 13 07:01:00 crc kubenswrapper[5048]: I1213 07:01:00.170183 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f5a1a5d-316c-4a98-80ab-3ee688ab0672" containerName="collect-profiles" Dec 13 07:01:00 crc kubenswrapper[5048]: I1213 07:01:00.170406 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f5a1a5d-316c-4a98-80ab-3ee688ab0672" containerName="collect-profiles" Dec 13 07:01:00 crc kubenswrapper[5048]: I1213 07:01:00.171193 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29426821-gd94t" Dec 13 07:01:00 crc kubenswrapper[5048]: I1213 07:01:00.193592 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29426821-gd94t"] Dec 13 07:01:00 crc kubenswrapper[5048]: I1213 07:01:00.303334 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nsfgm\" (UniqueName: \"kubernetes.io/projected/6748fa72-aa27-4717-9434-2a06950c519a-kube-api-access-nsfgm\") pod \"keystone-cron-29426821-gd94t\" (UID: \"6748fa72-aa27-4717-9434-2a06950c519a\") " pod="openstack/keystone-cron-29426821-gd94t" Dec 13 07:01:00 crc kubenswrapper[5048]: I1213 07:01:00.303379 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6748fa72-aa27-4717-9434-2a06950c519a-combined-ca-bundle\") pod \"keystone-cron-29426821-gd94t\" (UID: \"6748fa72-aa27-4717-9434-2a06950c519a\") " pod="openstack/keystone-cron-29426821-gd94t" Dec 13 07:01:00 crc kubenswrapper[5048]: I1213 07:01:00.303416 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6748fa72-aa27-4717-9434-2a06950c519a-config-data\") pod \"keystone-cron-29426821-gd94t\" (UID: \"6748fa72-aa27-4717-9434-2a06950c519a\") " pod="openstack/keystone-cron-29426821-gd94t" Dec 13 07:01:00 crc kubenswrapper[5048]: I1213 07:01:00.303521 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6748fa72-aa27-4717-9434-2a06950c519a-fernet-keys\") pod \"keystone-cron-29426821-gd94t\" (UID: \"6748fa72-aa27-4717-9434-2a06950c519a\") " pod="openstack/keystone-cron-29426821-gd94t" Dec 13 07:01:00 crc kubenswrapper[5048]: I1213 07:01:00.405759 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nsfgm\" (UniqueName: \"kubernetes.io/projected/6748fa72-aa27-4717-9434-2a06950c519a-kube-api-access-nsfgm\") pod \"keystone-cron-29426821-gd94t\" (UID: \"6748fa72-aa27-4717-9434-2a06950c519a\") " pod="openstack/keystone-cron-29426821-gd94t" Dec 13 07:01:00 crc kubenswrapper[5048]: I1213 07:01:00.405813 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6748fa72-aa27-4717-9434-2a06950c519a-combined-ca-bundle\") pod \"keystone-cron-29426821-gd94t\" (UID: \"6748fa72-aa27-4717-9434-2a06950c519a\") " pod="openstack/keystone-cron-29426821-gd94t" Dec 13 07:01:00 crc kubenswrapper[5048]: I1213 07:01:00.405859 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6748fa72-aa27-4717-9434-2a06950c519a-config-data\") pod \"keystone-cron-29426821-gd94t\" (UID: \"6748fa72-aa27-4717-9434-2a06950c519a\") " pod="openstack/keystone-cron-29426821-gd94t" Dec 13 07:01:00 crc kubenswrapper[5048]: I1213 07:01:00.405972 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6748fa72-aa27-4717-9434-2a06950c519a-fernet-keys\") pod \"keystone-cron-29426821-gd94t\" (UID: \"6748fa72-aa27-4717-9434-2a06950c519a\") " pod="openstack/keystone-cron-29426821-gd94t" Dec 13 07:01:00 crc kubenswrapper[5048]: I1213 07:01:00.416952 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6748fa72-aa27-4717-9434-2a06950c519a-fernet-keys\") pod \"keystone-cron-29426821-gd94t\" (UID: \"6748fa72-aa27-4717-9434-2a06950c519a\") " pod="openstack/keystone-cron-29426821-gd94t" Dec 13 07:01:00 crc kubenswrapper[5048]: I1213 07:01:00.417154 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6748fa72-aa27-4717-9434-2a06950c519a-combined-ca-bundle\") pod \"keystone-cron-29426821-gd94t\" (UID: \"6748fa72-aa27-4717-9434-2a06950c519a\") " pod="openstack/keystone-cron-29426821-gd94t" Dec 13 07:01:00 crc kubenswrapper[5048]: I1213 07:01:00.417276 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6748fa72-aa27-4717-9434-2a06950c519a-config-data\") pod \"keystone-cron-29426821-gd94t\" (UID: \"6748fa72-aa27-4717-9434-2a06950c519a\") " pod="openstack/keystone-cron-29426821-gd94t" Dec 13 07:01:00 crc kubenswrapper[5048]: I1213 07:01:00.436417 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nsfgm\" (UniqueName: \"kubernetes.io/projected/6748fa72-aa27-4717-9434-2a06950c519a-kube-api-access-nsfgm\") pod \"keystone-cron-29426821-gd94t\" (UID: \"6748fa72-aa27-4717-9434-2a06950c519a\") " pod="openstack/keystone-cron-29426821-gd94t" Dec 13 07:01:00 crc kubenswrapper[5048]: I1213 07:01:00.501723 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29426821-gd94t" Dec 13 07:01:00 crc kubenswrapper[5048]: I1213 07:01:00.956052 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29426821-gd94t"] Dec 13 07:01:00 crc kubenswrapper[5048]: W1213 07:01:00.969753 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6748fa72_aa27_4717_9434_2a06950c519a.slice/crio-68f15b4d4a2e6e23314beb5a4b703787238f2dc49a1a43b31e3046ea15246442 WatchSource:0}: Error finding container 68f15b4d4a2e6e23314beb5a4b703787238f2dc49a1a43b31e3046ea15246442: Status 404 returned error can't find the container with id 68f15b4d4a2e6e23314beb5a4b703787238f2dc49a1a43b31e3046ea15246442 Dec 13 07:01:01 crc kubenswrapper[5048]: I1213 07:01:01.579974 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29426821-gd94t" event={"ID":"6748fa72-aa27-4717-9434-2a06950c519a","Type":"ContainerStarted","Data":"8a87f1fb8d62238fc60c48c1afe0140c3d09df98fdd78346681b95ce6e7ce5c1"} Dec 13 07:01:01 crc kubenswrapper[5048]: I1213 07:01:01.580244 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29426821-gd94t" event={"ID":"6748fa72-aa27-4717-9434-2a06950c519a","Type":"ContainerStarted","Data":"68f15b4d4a2e6e23314beb5a4b703787238f2dc49a1a43b31e3046ea15246442"} Dec 13 07:01:01 crc kubenswrapper[5048]: I1213 07:01:01.599205 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29426821-gd94t" podStartSLOduration=1.599182731 podStartE2EDuration="1.599182731s" podCreationTimestamp="2025-12-13 07:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 07:01:01.594140735 +0000 UTC m=+1895.460735326" watchObservedRunningTime="2025-12-13 07:01:01.599182731 +0000 UTC m=+1895.465777322" Dec 13 07:01:03 crc kubenswrapper[5048]: I1213 07:01:03.597703 5048 generic.go:334] "Generic (PLEG): container finished" podID="6748fa72-aa27-4717-9434-2a06950c519a" containerID="8a87f1fb8d62238fc60c48c1afe0140c3d09df98fdd78346681b95ce6e7ce5c1" exitCode=0 Dec 13 07:01:03 crc kubenswrapper[5048]: I1213 07:01:03.597771 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29426821-gd94t" event={"ID":"6748fa72-aa27-4717-9434-2a06950c519a","Type":"ContainerDied","Data":"8a87f1fb8d62238fc60c48c1afe0140c3d09df98fdd78346681b95ce6e7ce5c1"} Dec 13 07:01:04 crc kubenswrapper[5048]: I1213 07:01:04.947542 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29426821-gd94t" Dec 13 07:01:05 crc kubenswrapper[5048]: I1213 07:01:05.093270 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6748fa72-aa27-4717-9434-2a06950c519a-config-data\") pod \"6748fa72-aa27-4717-9434-2a06950c519a\" (UID: \"6748fa72-aa27-4717-9434-2a06950c519a\") " Dec 13 07:01:05 crc kubenswrapper[5048]: I1213 07:01:05.093676 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nsfgm\" (UniqueName: \"kubernetes.io/projected/6748fa72-aa27-4717-9434-2a06950c519a-kube-api-access-nsfgm\") pod \"6748fa72-aa27-4717-9434-2a06950c519a\" (UID: \"6748fa72-aa27-4717-9434-2a06950c519a\") " Dec 13 07:01:05 crc kubenswrapper[5048]: I1213 07:01:05.093814 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6748fa72-aa27-4717-9434-2a06950c519a-fernet-keys\") pod \"6748fa72-aa27-4717-9434-2a06950c519a\" (UID: \"6748fa72-aa27-4717-9434-2a06950c519a\") " Dec 13 07:01:05 crc kubenswrapper[5048]: I1213 07:01:05.093870 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6748fa72-aa27-4717-9434-2a06950c519a-combined-ca-bundle\") pod \"6748fa72-aa27-4717-9434-2a06950c519a\" (UID: \"6748fa72-aa27-4717-9434-2a06950c519a\") " Dec 13 07:01:05 crc kubenswrapper[5048]: I1213 07:01:05.099008 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6748fa72-aa27-4717-9434-2a06950c519a-kube-api-access-nsfgm" (OuterVolumeSpecName: "kube-api-access-nsfgm") pod "6748fa72-aa27-4717-9434-2a06950c519a" (UID: "6748fa72-aa27-4717-9434-2a06950c519a"). InnerVolumeSpecName "kube-api-access-nsfgm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:01:05 crc kubenswrapper[5048]: I1213 07:01:05.099954 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6748fa72-aa27-4717-9434-2a06950c519a-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "6748fa72-aa27-4717-9434-2a06950c519a" (UID: "6748fa72-aa27-4717-9434-2a06950c519a"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:01:05 crc kubenswrapper[5048]: I1213 07:01:05.133210 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6748fa72-aa27-4717-9434-2a06950c519a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6748fa72-aa27-4717-9434-2a06950c519a" (UID: "6748fa72-aa27-4717-9434-2a06950c519a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:01:05 crc kubenswrapper[5048]: I1213 07:01:05.150648 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6748fa72-aa27-4717-9434-2a06950c519a-config-data" (OuterVolumeSpecName: "config-data") pod "6748fa72-aa27-4717-9434-2a06950c519a" (UID: "6748fa72-aa27-4717-9434-2a06950c519a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:01:05 crc kubenswrapper[5048]: I1213 07:01:05.196344 5048 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6748fa72-aa27-4717-9434-2a06950c519a-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 13 07:01:05 crc kubenswrapper[5048]: I1213 07:01:05.196379 5048 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6748fa72-aa27-4717-9434-2a06950c519a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 07:01:05 crc kubenswrapper[5048]: I1213 07:01:05.196393 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6748fa72-aa27-4717-9434-2a06950c519a-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 07:01:05 crc kubenswrapper[5048]: I1213 07:01:05.196403 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nsfgm\" (UniqueName: \"kubernetes.io/projected/6748fa72-aa27-4717-9434-2a06950c519a-kube-api-access-nsfgm\") on node \"crc\" DevicePath \"\"" Dec 13 07:01:05 crc kubenswrapper[5048]: I1213 07:01:05.566843 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 07:01:05 crc kubenswrapper[5048]: E1213 07:01:05.567052 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:01:05 crc kubenswrapper[5048]: I1213 07:01:05.616102 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29426821-gd94t" event={"ID":"6748fa72-aa27-4717-9434-2a06950c519a","Type":"ContainerDied","Data":"68f15b4d4a2e6e23314beb5a4b703787238f2dc49a1a43b31e3046ea15246442"} Dec 13 07:01:05 crc kubenswrapper[5048]: I1213 07:01:05.616140 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="68f15b4d4a2e6e23314beb5a4b703787238f2dc49a1a43b31e3046ea15246442" Dec 13 07:01:05 crc kubenswrapper[5048]: I1213 07:01:05.616163 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29426821-gd94t" Dec 13 07:01:12 crc kubenswrapper[5048]: I1213 07:01:12.681364 5048 generic.go:334] "Generic (PLEG): container finished" podID="7df2d78c-502d-4d7c-9233-cf01992cab77" containerID="18dedaca239702223d98fa712408a0120e2fd973414962d2e7c19809d9eb41e4" exitCode=0 Dec 13 07:01:12 crc kubenswrapper[5048]: I1213 07:01:12.681462 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc" event={"ID":"7df2d78c-502d-4d7c-9233-cf01992cab77","Type":"ContainerDied","Data":"18dedaca239702223d98fa712408a0120e2fd973414962d2e7c19809d9eb41e4"} Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.114659 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.263261 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7df2d78c-502d-4d7c-9233-cf01992cab77-inventory\") pod \"7df2d78c-502d-4d7c-9233-cf01992cab77\" (UID: \"7df2d78c-502d-4d7c-9233-cf01992cab77\") " Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.263517 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7df2d78c-502d-4d7c-9233-cf01992cab77-ssh-key\") pod \"7df2d78c-502d-4d7c-9233-cf01992cab77\" (UID: \"7df2d78c-502d-4d7c-9233-cf01992cab77\") " Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.263620 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j7688\" (UniqueName: \"kubernetes.io/projected/7df2d78c-502d-4d7c-9233-cf01992cab77-kube-api-access-j7688\") pod \"7df2d78c-502d-4d7c-9233-cf01992cab77\" (UID: \"7df2d78c-502d-4d7c-9233-cf01992cab77\") " Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.273252 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7df2d78c-502d-4d7c-9233-cf01992cab77-kube-api-access-j7688" (OuterVolumeSpecName: "kube-api-access-j7688") pod "7df2d78c-502d-4d7c-9233-cf01992cab77" (UID: "7df2d78c-502d-4d7c-9233-cf01992cab77"). InnerVolumeSpecName "kube-api-access-j7688". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.299870 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7df2d78c-502d-4d7c-9233-cf01992cab77-inventory" (OuterVolumeSpecName: "inventory") pod "7df2d78c-502d-4d7c-9233-cf01992cab77" (UID: "7df2d78c-502d-4d7c-9233-cf01992cab77"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.301686 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7df2d78c-502d-4d7c-9233-cf01992cab77-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7df2d78c-502d-4d7c-9233-cf01992cab77" (UID: "7df2d78c-502d-4d7c-9233-cf01992cab77"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.365814 5048 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7df2d78c-502d-4d7c-9233-cf01992cab77-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.365849 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j7688\" (UniqueName: \"kubernetes.io/projected/7df2d78c-502d-4d7c-9233-cf01992cab77-kube-api-access-j7688\") on node \"crc\" DevicePath \"\"" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.365861 5048 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7df2d78c-502d-4d7c-9233-cf01992cab77-inventory\") on node \"crc\" DevicePath \"\"" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.704659 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc" event={"ID":"7df2d78c-502d-4d7c-9233-cf01992cab77","Type":"ContainerDied","Data":"5724406cc79a2923f9463cc5298b1c7953eed320eca4d0e522deb7b572080ecc"} Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.704707 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5724406cc79a2923f9463cc5298b1c7953eed320eca4d0e522deb7b572080ecc" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.704772 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.794403 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5"] Dec 13 07:01:14 crc kubenswrapper[5048]: E1213 07:01:14.794757 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7df2d78c-502d-4d7c-9233-cf01992cab77" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.794774 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="7df2d78c-502d-4d7c-9233-cf01992cab77" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Dec 13 07:01:14 crc kubenswrapper[5048]: E1213 07:01:14.794803 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6748fa72-aa27-4717-9434-2a06950c519a" containerName="keystone-cron" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.794810 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="6748fa72-aa27-4717-9434-2a06950c519a" containerName="keystone-cron" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.794973 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="7df2d78c-502d-4d7c-9233-cf01992cab77" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.794993 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="6748fa72-aa27-4717-9434-2a06950c519a" containerName="keystone-cron" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.796530 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.800176 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.800249 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.800677 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hgp7p" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.803494 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.819187 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5"] Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.874060 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6a5d7f38-72b9-4092-a948-c775ce64d40c-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5\" (UID: \"6a5d7f38-72b9-4092-a948-c775ce64d40c\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.874128 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vc7q\" (UniqueName: \"kubernetes.io/projected/6a5d7f38-72b9-4092-a948-c775ce64d40c-kube-api-access-4vc7q\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5\" (UID: \"6a5d7f38-72b9-4092-a948-c775ce64d40c\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.874162 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6a5d7f38-72b9-4092-a948-c775ce64d40c-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5\" (UID: \"6a5d7f38-72b9-4092-a948-c775ce64d40c\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.975933 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vc7q\" (UniqueName: \"kubernetes.io/projected/6a5d7f38-72b9-4092-a948-c775ce64d40c-kube-api-access-4vc7q\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5\" (UID: \"6a5d7f38-72b9-4092-a948-c775ce64d40c\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.975989 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6a5d7f38-72b9-4092-a948-c775ce64d40c-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5\" (UID: \"6a5d7f38-72b9-4092-a948-c775ce64d40c\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.976011 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6a5d7f38-72b9-4092-a948-c775ce64d40c-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5\" (UID: \"6a5d7f38-72b9-4092-a948-c775ce64d40c\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.979962 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6a5d7f38-72b9-4092-a948-c775ce64d40c-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5\" (UID: \"6a5d7f38-72b9-4092-a948-c775ce64d40c\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.979960 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6a5d7f38-72b9-4092-a948-c775ce64d40c-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5\" (UID: \"6a5d7f38-72b9-4092-a948-c775ce64d40c\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5" Dec 13 07:01:14 crc kubenswrapper[5048]: I1213 07:01:14.995748 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vc7q\" (UniqueName: \"kubernetes.io/projected/6a5d7f38-72b9-4092-a948-c775ce64d40c-kube-api-access-4vc7q\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5\" (UID: \"6a5d7f38-72b9-4092-a948-c775ce64d40c\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5" Dec 13 07:01:15 crc kubenswrapper[5048]: I1213 07:01:15.119332 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5" Dec 13 07:01:15 crc kubenswrapper[5048]: I1213 07:01:15.679099 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5"] Dec 13 07:01:15 crc kubenswrapper[5048]: I1213 07:01:15.714636 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5" event={"ID":"6a5d7f38-72b9-4092-a948-c775ce64d40c","Type":"ContainerStarted","Data":"e34ac91731f3e6259aef66bc424bce467a630e285e09f3edaf606a8dd948df76"} Dec 13 07:01:16 crc kubenswrapper[5048]: I1213 07:01:16.724407 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5" event={"ID":"6a5d7f38-72b9-4092-a948-c775ce64d40c","Type":"ContainerStarted","Data":"5dae3c4e93e612248f0e79b1407883c9d54ab7ac40e70c8fd3fe0819190afce5"} Dec 13 07:01:16 crc kubenswrapper[5048]: I1213 07:01:16.747219 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5" podStartSLOduration=2.178137694 podStartE2EDuration="2.747200729s" podCreationTimestamp="2025-12-13 07:01:14 +0000 UTC" firstStartedPulling="2025-12-13 07:01:15.689560066 +0000 UTC m=+1909.556154647" lastFinishedPulling="2025-12-13 07:01:16.258623101 +0000 UTC m=+1910.125217682" observedRunningTime="2025-12-13 07:01:16.739961814 +0000 UTC m=+1910.606556395" watchObservedRunningTime="2025-12-13 07:01:16.747200729 +0000 UTC m=+1910.613795320" Dec 13 07:01:18 crc kubenswrapper[5048]: I1213 07:01:18.567323 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 07:01:19 crc kubenswrapper[5048]: I1213 07:01:19.754374 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerStarted","Data":"53e69e2487d25cf71ad6303383d3f610cb5fd038d34d4d4b9e63796a8c974cf5"} Dec 13 07:01:21 crc kubenswrapper[5048]: I1213 07:01:21.774053 5048 generic.go:334] "Generic (PLEG): container finished" podID="6a5d7f38-72b9-4092-a948-c775ce64d40c" containerID="5dae3c4e93e612248f0e79b1407883c9d54ab7ac40e70c8fd3fe0819190afce5" exitCode=0 Dec 13 07:01:21 crc kubenswrapper[5048]: I1213 07:01:21.774160 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5" event={"ID":"6a5d7f38-72b9-4092-a948-c775ce64d40c","Type":"ContainerDied","Data":"5dae3c4e93e612248f0e79b1407883c9d54ab7ac40e70c8fd3fe0819190afce5"} Dec 13 07:01:23 crc kubenswrapper[5048]: I1213 07:01:23.228238 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5" Dec 13 07:01:23 crc kubenswrapper[5048]: I1213 07:01:23.333728 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6a5d7f38-72b9-4092-a948-c775ce64d40c-ssh-key\") pod \"6a5d7f38-72b9-4092-a948-c775ce64d40c\" (UID: \"6a5d7f38-72b9-4092-a948-c775ce64d40c\") " Dec 13 07:01:23 crc kubenswrapper[5048]: I1213 07:01:23.333808 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6a5d7f38-72b9-4092-a948-c775ce64d40c-inventory\") pod \"6a5d7f38-72b9-4092-a948-c775ce64d40c\" (UID: \"6a5d7f38-72b9-4092-a948-c775ce64d40c\") " Dec 13 07:01:23 crc kubenswrapper[5048]: I1213 07:01:23.334051 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vc7q\" (UniqueName: \"kubernetes.io/projected/6a5d7f38-72b9-4092-a948-c775ce64d40c-kube-api-access-4vc7q\") pod \"6a5d7f38-72b9-4092-a948-c775ce64d40c\" (UID: \"6a5d7f38-72b9-4092-a948-c775ce64d40c\") " Dec 13 07:01:23 crc kubenswrapper[5048]: I1213 07:01:23.348592 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a5d7f38-72b9-4092-a948-c775ce64d40c-kube-api-access-4vc7q" (OuterVolumeSpecName: "kube-api-access-4vc7q") pod "6a5d7f38-72b9-4092-a948-c775ce64d40c" (UID: "6a5d7f38-72b9-4092-a948-c775ce64d40c"). InnerVolumeSpecName "kube-api-access-4vc7q". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:01:23 crc kubenswrapper[5048]: I1213 07:01:23.370628 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a5d7f38-72b9-4092-a948-c775ce64d40c-inventory" (OuterVolumeSpecName: "inventory") pod "6a5d7f38-72b9-4092-a948-c775ce64d40c" (UID: "6a5d7f38-72b9-4092-a948-c775ce64d40c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:01:23 crc kubenswrapper[5048]: I1213 07:01:23.373772 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a5d7f38-72b9-4092-a948-c775ce64d40c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6a5d7f38-72b9-4092-a948-c775ce64d40c" (UID: "6a5d7f38-72b9-4092-a948-c775ce64d40c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:01:23 crc kubenswrapper[5048]: I1213 07:01:23.436497 5048 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6a5d7f38-72b9-4092-a948-c775ce64d40c-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 13 07:01:23 crc kubenswrapper[5048]: I1213 07:01:23.436545 5048 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6a5d7f38-72b9-4092-a948-c775ce64d40c-inventory\") on node \"crc\" DevicePath \"\"" Dec 13 07:01:23 crc kubenswrapper[5048]: I1213 07:01:23.436558 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vc7q\" (UniqueName: \"kubernetes.io/projected/6a5d7f38-72b9-4092-a948-c775ce64d40c-kube-api-access-4vc7q\") on node \"crc\" DevicePath \"\"" Dec 13 07:01:23 crc kubenswrapper[5048]: I1213 07:01:23.791758 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5" event={"ID":"6a5d7f38-72b9-4092-a948-c775ce64d40c","Type":"ContainerDied","Data":"e34ac91731f3e6259aef66bc424bce467a630e285e09f3edaf606a8dd948df76"} Dec 13 07:01:23 crc kubenswrapper[5048]: I1213 07:01:23.792135 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e34ac91731f3e6259aef66bc424bce467a630e285e09f3edaf606a8dd948df76" Dec 13 07:01:23 crc kubenswrapper[5048]: I1213 07:01:23.791796 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5" Dec 13 07:01:23 crc kubenswrapper[5048]: I1213 07:01:23.971739 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-5bk98"] Dec 13 07:01:23 crc kubenswrapper[5048]: E1213 07:01:23.972166 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a5d7f38-72b9-4092-a948-c775ce64d40c" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Dec 13 07:01:23 crc kubenswrapper[5048]: I1213 07:01:23.972183 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a5d7f38-72b9-4092-a948-c775ce64d40c" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Dec 13 07:01:23 crc kubenswrapper[5048]: I1213 07:01:23.972377 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a5d7f38-72b9-4092-a948-c775ce64d40c" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Dec 13 07:01:23 crc kubenswrapper[5048]: I1213 07:01:23.973021 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-5bk98" Dec 13 07:01:23 crc kubenswrapper[5048]: I1213 07:01:23.976620 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 13 07:01:23 crc kubenswrapper[5048]: I1213 07:01:23.979535 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 13 07:01:23 crc kubenswrapper[5048]: I1213 07:01:23.979832 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 13 07:01:23 crc kubenswrapper[5048]: I1213 07:01:23.981303 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hgp7p" Dec 13 07:01:23 crc kubenswrapper[5048]: I1213 07:01:23.983980 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-5bk98"] Dec 13 07:01:24 crc kubenswrapper[5048]: I1213 07:01:24.051721 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/58f90e59-a2c6-4099-b5eb-6a35c0448a1f-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-5bk98\" (UID: \"58f90e59-a2c6-4099-b5eb-6a35c0448a1f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-5bk98" Dec 13 07:01:24 crc kubenswrapper[5048]: I1213 07:01:24.051813 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/58f90e59-a2c6-4099-b5eb-6a35c0448a1f-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-5bk98\" (UID: \"58f90e59-a2c6-4099-b5eb-6a35c0448a1f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-5bk98" Dec 13 07:01:24 crc kubenswrapper[5048]: I1213 07:01:24.051892 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjht4\" (UniqueName: \"kubernetes.io/projected/58f90e59-a2c6-4099-b5eb-6a35c0448a1f-kube-api-access-tjht4\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-5bk98\" (UID: \"58f90e59-a2c6-4099-b5eb-6a35c0448a1f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-5bk98" Dec 13 07:01:24 crc kubenswrapper[5048]: I1213 07:01:24.153860 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/58f90e59-a2c6-4099-b5eb-6a35c0448a1f-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-5bk98\" (UID: \"58f90e59-a2c6-4099-b5eb-6a35c0448a1f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-5bk98" Dec 13 07:01:24 crc kubenswrapper[5048]: I1213 07:01:24.153937 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjht4\" (UniqueName: \"kubernetes.io/projected/58f90e59-a2c6-4099-b5eb-6a35c0448a1f-kube-api-access-tjht4\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-5bk98\" (UID: \"58f90e59-a2c6-4099-b5eb-6a35c0448a1f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-5bk98" Dec 13 07:01:24 crc kubenswrapper[5048]: I1213 07:01:24.154043 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/58f90e59-a2c6-4099-b5eb-6a35c0448a1f-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-5bk98\" (UID: \"58f90e59-a2c6-4099-b5eb-6a35c0448a1f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-5bk98" Dec 13 07:01:24 crc kubenswrapper[5048]: I1213 07:01:24.158360 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/58f90e59-a2c6-4099-b5eb-6a35c0448a1f-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-5bk98\" (UID: \"58f90e59-a2c6-4099-b5eb-6a35c0448a1f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-5bk98" Dec 13 07:01:24 crc kubenswrapper[5048]: I1213 07:01:24.158572 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/58f90e59-a2c6-4099-b5eb-6a35c0448a1f-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-5bk98\" (UID: \"58f90e59-a2c6-4099-b5eb-6a35c0448a1f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-5bk98" Dec 13 07:01:24 crc kubenswrapper[5048]: I1213 07:01:24.182457 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjht4\" (UniqueName: \"kubernetes.io/projected/58f90e59-a2c6-4099-b5eb-6a35c0448a1f-kube-api-access-tjht4\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-5bk98\" (UID: \"58f90e59-a2c6-4099-b5eb-6a35c0448a1f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-5bk98" Dec 13 07:01:24 crc kubenswrapper[5048]: I1213 07:01:24.293657 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-5bk98" Dec 13 07:01:24 crc kubenswrapper[5048]: I1213 07:01:24.853863 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-5bk98"] Dec 13 07:01:25 crc kubenswrapper[5048]: I1213 07:01:25.817699 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-5bk98" event={"ID":"58f90e59-a2c6-4099-b5eb-6a35c0448a1f","Type":"ContainerStarted","Data":"070f89cd630cafe436ef7d1990cca33bd150e1147ef04f8ef634baf11ffd6661"} Dec 13 07:01:25 crc kubenswrapper[5048]: I1213 07:01:25.818038 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-5bk98" event={"ID":"58f90e59-a2c6-4099-b5eb-6a35c0448a1f","Type":"ContainerStarted","Data":"badc9900c262bfb2ab52315a49e66132cca6392035e136050f93b0c11d97e188"} Dec 13 07:01:25 crc kubenswrapper[5048]: I1213 07:01:25.841088 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-5bk98" podStartSLOduration=2.410579234 podStartE2EDuration="2.841066304s" podCreationTimestamp="2025-12-13 07:01:23 +0000 UTC" firstStartedPulling="2025-12-13 07:01:24.856721137 +0000 UTC m=+1918.723315738" lastFinishedPulling="2025-12-13 07:01:25.287208207 +0000 UTC m=+1919.153802808" observedRunningTime="2025-12-13 07:01:25.839735979 +0000 UTC m=+1919.706330600" watchObservedRunningTime="2025-12-13 07:01:25.841066304 +0000 UTC m=+1919.707660895" Dec 13 07:01:31 crc kubenswrapper[5048]: I1213 07:01:31.065849 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-rgt9h"] Dec 13 07:01:31 crc kubenswrapper[5048]: I1213 07:01:31.084582 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-rgt9h"] Dec 13 07:01:32 crc kubenswrapper[5048]: I1213 07:01:32.579817 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc8db113-49b5-41af-af25-3980140fc49d" path="/var/lib/kubelet/pods/cc8db113-49b5-41af-af25-3980140fc49d/volumes" Dec 13 07:01:48 crc kubenswrapper[5048]: I1213 07:01:48.048093 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-v2pzq"] Dec 13 07:01:48 crc kubenswrapper[5048]: I1213 07:01:48.057800 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-npmvk"] Dec 13 07:01:48 crc kubenswrapper[5048]: I1213 07:01:48.067802 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-npmvk"] Dec 13 07:01:48 crc kubenswrapper[5048]: I1213 07:01:48.074551 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-v2pzq"] Dec 13 07:01:48 crc kubenswrapper[5048]: I1213 07:01:48.594139 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d733f09-6f54-4b02-b44e-81bc02321044" path="/var/lib/kubelet/pods/6d733f09-6f54-4b02-b44e-81bc02321044/volumes" Dec 13 07:01:48 crc kubenswrapper[5048]: I1213 07:01:48.595564 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b3c1380-a6bf-4bf2-b648-1e548fb07c12" path="/var/lib/kubelet/pods/8b3c1380-a6bf-4bf2-b648-1e548fb07c12/volumes" Dec 13 07:01:53 crc kubenswrapper[5048]: I1213 07:01:53.166969 5048 scope.go:117] "RemoveContainer" containerID="1836452fa3d42974628dbe02f1341f7df3a4d171511d8f43b115e56ea653baa0" Dec 13 07:01:53 crc kubenswrapper[5048]: I1213 07:01:53.190034 5048 scope.go:117] "RemoveContainer" containerID="d489cd812c2b014edbb21a775e2639f571052b8fd1b18112b52c43c14f1dbc64" Dec 13 07:01:53 crc kubenswrapper[5048]: I1213 07:01:53.216967 5048 scope.go:117] "RemoveContainer" containerID="fcdc0428ff0c65f5b4967dd59f7ba95449904abbc314e067bfbb6e57d8f923ee" Dec 13 07:01:53 crc kubenswrapper[5048]: I1213 07:01:53.313373 5048 scope.go:117] "RemoveContainer" containerID="509b9f55120eec3f4d51daaf8e43854a9b210d1f4e1d4f676355369fdb0f5949" Dec 13 07:01:53 crc kubenswrapper[5048]: I1213 07:01:53.344770 5048 scope.go:117] "RemoveContainer" containerID="b69d3fabd5b57a9e5f2d773f052a1db758f3b06eda0c08db7ce4ebd5ecc6784b" Dec 13 07:01:53 crc kubenswrapper[5048]: I1213 07:01:53.390527 5048 scope.go:117] "RemoveContainer" containerID="e765a8ba38d5b50a76fd8a443424d65d63bdd3325c768dfa3ddc6b59ee7aa616" Dec 13 07:01:53 crc kubenswrapper[5048]: I1213 07:01:53.439038 5048 scope.go:117] "RemoveContainer" containerID="fa48d722834768085e81c545be3ea57e6100150ff87da89799a5d0bdae19c0d6" Dec 13 07:01:53 crc kubenswrapper[5048]: I1213 07:01:53.475214 5048 scope.go:117] "RemoveContainer" containerID="2b9a2f24de943544ad9499b48208b1f382c3bfa4b625bdf0af56acfb6bd6232a" Dec 13 07:01:53 crc kubenswrapper[5048]: I1213 07:01:53.516786 5048 scope.go:117] "RemoveContainer" containerID="e758178fd2b3c4b94a78172706bea64d76e51dee0040adad084fd4253868ed82" Dec 13 07:01:53 crc kubenswrapper[5048]: I1213 07:01:53.554789 5048 scope.go:117] "RemoveContainer" containerID="364a211cd614b412277535f25bd55c09554bfe33e018dd0e2294b3f08d7fb1a5" Dec 13 07:01:53 crc kubenswrapper[5048]: I1213 07:01:53.573934 5048 scope.go:117] "RemoveContainer" containerID="869133386f138ce3e83d7c0462807aa4e2748337faf591013e0f304dcaaa255c" Dec 13 07:01:53 crc kubenswrapper[5048]: I1213 07:01:53.594789 5048 scope.go:117] "RemoveContainer" containerID="82b7d04cc4842f3ec347f5de3aec97895d8b06b0b5c32ac67d2ef77c93994b9f" Dec 13 07:02:02 crc kubenswrapper[5048]: I1213 07:02:02.158309 5048 generic.go:334] "Generic (PLEG): container finished" podID="58f90e59-a2c6-4099-b5eb-6a35c0448a1f" containerID="070f89cd630cafe436ef7d1990cca33bd150e1147ef04f8ef634baf11ffd6661" exitCode=0 Dec 13 07:02:02 crc kubenswrapper[5048]: I1213 07:02:02.158385 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-5bk98" event={"ID":"58f90e59-a2c6-4099-b5eb-6a35c0448a1f","Type":"ContainerDied","Data":"070f89cd630cafe436ef7d1990cca33bd150e1147ef04f8ef634baf11ffd6661"} Dec 13 07:02:03 crc kubenswrapper[5048]: I1213 07:02:03.561674 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-5bk98" Dec 13 07:02:03 crc kubenswrapper[5048]: I1213 07:02:03.730257 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/58f90e59-a2c6-4099-b5eb-6a35c0448a1f-inventory\") pod \"58f90e59-a2c6-4099-b5eb-6a35c0448a1f\" (UID: \"58f90e59-a2c6-4099-b5eb-6a35c0448a1f\") " Dec 13 07:02:03 crc kubenswrapper[5048]: I1213 07:02:03.730449 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tjht4\" (UniqueName: \"kubernetes.io/projected/58f90e59-a2c6-4099-b5eb-6a35c0448a1f-kube-api-access-tjht4\") pod \"58f90e59-a2c6-4099-b5eb-6a35c0448a1f\" (UID: \"58f90e59-a2c6-4099-b5eb-6a35c0448a1f\") " Dec 13 07:02:03 crc kubenswrapper[5048]: I1213 07:02:03.730492 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/58f90e59-a2c6-4099-b5eb-6a35c0448a1f-ssh-key\") pod \"58f90e59-a2c6-4099-b5eb-6a35c0448a1f\" (UID: \"58f90e59-a2c6-4099-b5eb-6a35c0448a1f\") " Dec 13 07:02:03 crc kubenswrapper[5048]: I1213 07:02:03.738578 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58f90e59-a2c6-4099-b5eb-6a35c0448a1f-kube-api-access-tjht4" (OuterVolumeSpecName: "kube-api-access-tjht4") pod "58f90e59-a2c6-4099-b5eb-6a35c0448a1f" (UID: "58f90e59-a2c6-4099-b5eb-6a35c0448a1f"). InnerVolumeSpecName "kube-api-access-tjht4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:02:03 crc kubenswrapper[5048]: I1213 07:02:03.757431 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58f90e59-a2c6-4099-b5eb-6a35c0448a1f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "58f90e59-a2c6-4099-b5eb-6a35c0448a1f" (UID: "58f90e59-a2c6-4099-b5eb-6a35c0448a1f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:02:03 crc kubenswrapper[5048]: I1213 07:02:03.760267 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58f90e59-a2c6-4099-b5eb-6a35c0448a1f-inventory" (OuterVolumeSpecName: "inventory") pod "58f90e59-a2c6-4099-b5eb-6a35c0448a1f" (UID: "58f90e59-a2c6-4099-b5eb-6a35c0448a1f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:02:03 crc kubenswrapper[5048]: I1213 07:02:03.832852 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tjht4\" (UniqueName: \"kubernetes.io/projected/58f90e59-a2c6-4099-b5eb-6a35c0448a1f-kube-api-access-tjht4\") on node \"crc\" DevicePath \"\"" Dec 13 07:02:03 crc kubenswrapper[5048]: I1213 07:02:03.832892 5048 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/58f90e59-a2c6-4099-b5eb-6a35c0448a1f-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 13 07:02:03 crc kubenswrapper[5048]: I1213 07:02:03.832905 5048 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/58f90e59-a2c6-4099-b5eb-6a35c0448a1f-inventory\") on node \"crc\" DevicePath \"\"" Dec 13 07:02:04 crc kubenswrapper[5048]: I1213 07:02:04.179084 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-5bk98" event={"ID":"58f90e59-a2c6-4099-b5eb-6a35c0448a1f","Type":"ContainerDied","Data":"badc9900c262bfb2ab52315a49e66132cca6392035e136050f93b0c11d97e188"} Dec 13 07:02:04 crc kubenswrapper[5048]: I1213 07:02:04.179132 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="badc9900c262bfb2ab52315a49e66132cca6392035e136050f93b0c11d97e188" Dec 13 07:02:04 crc kubenswrapper[5048]: I1213 07:02:04.179141 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-5bk98" Dec 13 07:02:04 crc kubenswrapper[5048]: I1213 07:02:04.275290 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7"] Dec 13 07:02:04 crc kubenswrapper[5048]: E1213 07:02:04.275700 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58f90e59-a2c6-4099-b5eb-6a35c0448a1f" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Dec 13 07:02:04 crc kubenswrapper[5048]: I1213 07:02:04.275718 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="58f90e59-a2c6-4099-b5eb-6a35c0448a1f" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Dec 13 07:02:04 crc kubenswrapper[5048]: I1213 07:02:04.275883 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="58f90e59-a2c6-4099-b5eb-6a35c0448a1f" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Dec 13 07:02:04 crc kubenswrapper[5048]: I1213 07:02:04.276541 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7" Dec 13 07:02:04 crc kubenswrapper[5048]: I1213 07:02:04.278367 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 13 07:02:04 crc kubenswrapper[5048]: I1213 07:02:04.278479 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 13 07:02:04 crc kubenswrapper[5048]: I1213 07:02:04.279807 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 13 07:02:04 crc kubenswrapper[5048]: I1213 07:02:04.285401 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7"] Dec 13 07:02:04 crc kubenswrapper[5048]: I1213 07:02:04.285797 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hgp7p" Dec 13 07:02:04 crc kubenswrapper[5048]: I1213 07:02:04.444059 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7xz5\" (UniqueName: \"kubernetes.io/projected/cfd8e6ef-8724-4363-8a56-71f2b0f24f15-kube-api-access-g7xz5\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7\" (UID: \"cfd8e6ef-8724-4363-8a56-71f2b0f24f15\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7" Dec 13 07:02:04 crc kubenswrapper[5048]: I1213 07:02:04.444134 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cfd8e6ef-8724-4363-8a56-71f2b0f24f15-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7\" (UID: \"cfd8e6ef-8724-4363-8a56-71f2b0f24f15\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7" Dec 13 07:02:04 crc kubenswrapper[5048]: I1213 07:02:04.444289 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cfd8e6ef-8724-4363-8a56-71f2b0f24f15-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7\" (UID: \"cfd8e6ef-8724-4363-8a56-71f2b0f24f15\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7" Dec 13 07:02:04 crc kubenswrapper[5048]: I1213 07:02:04.546333 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cfd8e6ef-8724-4363-8a56-71f2b0f24f15-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7\" (UID: \"cfd8e6ef-8724-4363-8a56-71f2b0f24f15\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7" Dec 13 07:02:04 crc kubenswrapper[5048]: I1213 07:02:04.546857 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7xz5\" (UniqueName: \"kubernetes.io/projected/cfd8e6ef-8724-4363-8a56-71f2b0f24f15-kube-api-access-g7xz5\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7\" (UID: \"cfd8e6ef-8724-4363-8a56-71f2b0f24f15\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7" Dec 13 07:02:04 crc kubenswrapper[5048]: I1213 07:02:04.546929 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cfd8e6ef-8724-4363-8a56-71f2b0f24f15-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7\" (UID: \"cfd8e6ef-8724-4363-8a56-71f2b0f24f15\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7" Dec 13 07:02:04 crc kubenswrapper[5048]: I1213 07:02:04.551221 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cfd8e6ef-8724-4363-8a56-71f2b0f24f15-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7\" (UID: \"cfd8e6ef-8724-4363-8a56-71f2b0f24f15\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7" Dec 13 07:02:04 crc kubenswrapper[5048]: I1213 07:02:04.551420 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cfd8e6ef-8724-4363-8a56-71f2b0f24f15-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7\" (UID: \"cfd8e6ef-8724-4363-8a56-71f2b0f24f15\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7" Dec 13 07:02:04 crc kubenswrapper[5048]: I1213 07:02:04.566772 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7xz5\" (UniqueName: \"kubernetes.io/projected/cfd8e6ef-8724-4363-8a56-71f2b0f24f15-kube-api-access-g7xz5\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7\" (UID: \"cfd8e6ef-8724-4363-8a56-71f2b0f24f15\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7" Dec 13 07:02:04 crc kubenswrapper[5048]: I1213 07:02:04.597515 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7" Dec 13 07:02:05 crc kubenswrapper[5048]: I1213 07:02:05.134512 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7"] Dec 13 07:02:05 crc kubenswrapper[5048]: I1213 07:02:05.188809 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7" event={"ID":"cfd8e6ef-8724-4363-8a56-71f2b0f24f15","Type":"ContainerStarted","Data":"35c216f0db33e54939f27ffb659f69976bb8a5ece089a850401cbe199ca5ced0"} Dec 13 07:02:07 crc kubenswrapper[5048]: I1213 07:02:07.209288 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7" event={"ID":"cfd8e6ef-8724-4363-8a56-71f2b0f24f15","Type":"ContainerStarted","Data":"d60fd4bc75d4a6f9b3f72c14a251d99d4126daa1876090e6e2f3c56804fef994"} Dec 13 07:02:07 crc kubenswrapper[5048]: I1213 07:02:07.228307 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7" podStartSLOduration=2.282130263 podStartE2EDuration="3.228282612s" podCreationTimestamp="2025-12-13 07:02:04 +0000 UTC" firstStartedPulling="2025-12-13 07:02:05.136067199 +0000 UTC m=+1959.002661780" lastFinishedPulling="2025-12-13 07:02:06.082219508 +0000 UTC m=+1959.948814129" observedRunningTime="2025-12-13 07:02:07.225831396 +0000 UTC m=+1961.092425997" watchObservedRunningTime="2025-12-13 07:02:07.228282612 +0000 UTC m=+1961.094877193" Dec 13 07:02:33 crc kubenswrapper[5048]: I1213 07:02:33.049198 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-r4vpp"] Dec 13 07:02:33 crc kubenswrapper[5048]: I1213 07:02:33.080256 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-r4vpp"] Dec 13 07:02:34 crc kubenswrapper[5048]: I1213 07:02:34.577328 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b83ee75-f7b6-4c4e-9439-49b4646efc16" path="/var/lib/kubelet/pods/1b83ee75-f7b6-4c4e-9439-49b4646efc16/volumes" Dec 13 07:02:53 crc kubenswrapper[5048]: I1213 07:02:53.635625 5048 generic.go:334] "Generic (PLEG): container finished" podID="cfd8e6ef-8724-4363-8a56-71f2b0f24f15" containerID="d60fd4bc75d4a6f9b3f72c14a251d99d4126daa1876090e6e2f3c56804fef994" exitCode=0 Dec 13 07:02:53 crc kubenswrapper[5048]: I1213 07:02:53.635691 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7" event={"ID":"cfd8e6ef-8724-4363-8a56-71f2b0f24f15","Type":"ContainerDied","Data":"d60fd4bc75d4a6f9b3f72c14a251d99d4126daa1876090e6e2f3c56804fef994"} Dec 13 07:02:53 crc kubenswrapper[5048]: I1213 07:02:53.806845 5048 scope.go:117] "RemoveContainer" containerID="612298789e3bc2581412496aaca87fcd6bd3d941aa5ac33cc9659c35ad3ee290" Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.037906 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7" Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.227610 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cfd8e6ef-8724-4363-8a56-71f2b0f24f15-ssh-key\") pod \"cfd8e6ef-8724-4363-8a56-71f2b0f24f15\" (UID: \"cfd8e6ef-8724-4363-8a56-71f2b0f24f15\") " Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.227715 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g7xz5\" (UniqueName: \"kubernetes.io/projected/cfd8e6ef-8724-4363-8a56-71f2b0f24f15-kube-api-access-g7xz5\") pod \"cfd8e6ef-8724-4363-8a56-71f2b0f24f15\" (UID: \"cfd8e6ef-8724-4363-8a56-71f2b0f24f15\") " Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.227786 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cfd8e6ef-8724-4363-8a56-71f2b0f24f15-inventory\") pod \"cfd8e6ef-8724-4363-8a56-71f2b0f24f15\" (UID: \"cfd8e6ef-8724-4363-8a56-71f2b0f24f15\") " Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.240981 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfd8e6ef-8724-4363-8a56-71f2b0f24f15-kube-api-access-g7xz5" (OuterVolumeSpecName: "kube-api-access-g7xz5") pod "cfd8e6ef-8724-4363-8a56-71f2b0f24f15" (UID: "cfd8e6ef-8724-4363-8a56-71f2b0f24f15"). InnerVolumeSpecName "kube-api-access-g7xz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.269172 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfd8e6ef-8724-4363-8a56-71f2b0f24f15-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "cfd8e6ef-8724-4363-8a56-71f2b0f24f15" (UID: "cfd8e6ef-8724-4363-8a56-71f2b0f24f15"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.269479 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfd8e6ef-8724-4363-8a56-71f2b0f24f15-inventory" (OuterVolumeSpecName: "inventory") pod "cfd8e6ef-8724-4363-8a56-71f2b0f24f15" (UID: "cfd8e6ef-8724-4363-8a56-71f2b0f24f15"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.329760 5048 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cfd8e6ef-8724-4363-8a56-71f2b0f24f15-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.329800 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g7xz5\" (UniqueName: \"kubernetes.io/projected/cfd8e6ef-8724-4363-8a56-71f2b0f24f15-kube-api-access-g7xz5\") on node \"crc\" DevicePath \"\"" Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.329813 5048 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cfd8e6ef-8724-4363-8a56-71f2b0f24f15-inventory\") on node \"crc\" DevicePath \"\"" Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.655555 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7" event={"ID":"cfd8e6ef-8724-4363-8a56-71f2b0f24f15","Type":"ContainerDied","Data":"35c216f0db33e54939f27ffb659f69976bb8a5ece089a850401cbe199ca5ced0"} Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.655602 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="35c216f0db33e54939f27ffb659f69976bb8a5ece089a850401cbe199ca5ced0" Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.655572 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7" Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.741341 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-mwrz4"] Dec 13 07:02:55 crc kubenswrapper[5048]: E1213 07:02:55.741836 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfd8e6ef-8724-4363-8a56-71f2b0f24f15" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.741868 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfd8e6ef-8724-4363-8a56-71f2b0f24f15" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.742122 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfd8e6ef-8724-4363-8a56-71f2b0f24f15" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.742884 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-mwrz4" Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.748399 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.748629 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hgp7p" Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.748781 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.749015 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.782877 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-mwrz4"] Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.939689 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2k7gv\" (UniqueName: \"kubernetes.io/projected/a097705b-8b45-470f-8026-744ebdc4083a-kube-api-access-2k7gv\") pod \"ssh-known-hosts-edpm-deployment-mwrz4\" (UID: \"a097705b-8b45-470f-8026-744ebdc4083a\") " pod="openstack/ssh-known-hosts-edpm-deployment-mwrz4" Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.940152 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a097705b-8b45-470f-8026-744ebdc4083a-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-mwrz4\" (UID: \"a097705b-8b45-470f-8026-744ebdc4083a\") " pod="openstack/ssh-known-hosts-edpm-deployment-mwrz4" Dec 13 07:02:55 crc kubenswrapper[5048]: I1213 07:02:55.940473 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/a097705b-8b45-470f-8026-744ebdc4083a-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-mwrz4\" (UID: \"a097705b-8b45-470f-8026-744ebdc4083a\") " pod="openstack/ssh-known-hosts-edpm-deployment-mwrz4" Dec 13 07:02:56 crc kubenswrapper[5048]: I1213 07:02:56.042073 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a097705b-8b45-470f-8026-744ebdc4083a-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-mwrz4\" (UID: \"a097705b-8b45-470f-8026-744ebdc4083a\") " pod="openstack/ssh-known-hosts-edpm-deployment-mwrz4" Dec 13 07:02:56 crc kubenswrapper[5048]: I1213 07:02:56.042418 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/a097705b-8b45-470f-8026-744ebdc4083a-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-mwrz4\" (UID: \"a097705b-8b45-470f-8026-744ebdc4083a\") " pod="openstack/ssh-known-hosts-edpm-deployment-mwrz4" Dec 13 07:02:56 crc kubenswrapper[5048]: I1213 07:02:56.042536 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2k7gv\" (UniqueName: \"kubernetes.io/projected/a097705b-8b45-470f-8026-744ebdc4083a-kube-api-access-2k7gv\") pod \"ssh-known-hosts-edpm-deployment-mwrz4\" (UID: \"a097705b-8b45-470f-8026-744ebdc4083a\") " pod="openstack/ssh-known-hosts-edpm-deployment-mwrz4" Dec 13 07:02:56 crc kubenswrapper[5048]: I1213 07:02:56.046354 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a097705b-8b45-470f-8026-744ebdc4083a-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-mwrz4\" (UID: \"a097705b-8b45-470f-8026-744ebdc4083a\") " pod="openstack/ssh-known-hosts-edpm-deployment-mwrz4" Dec 13 07:02:56 crc kubenswrapper[5048]: I1213 07:02:56.051929 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/a097705b-8b45-470f-8026-744ebdc4083a-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-mwrz4\" (UID: \"a097705b-8b45-470f-8026-744ebdc4083a\") " pod="openstack/ssh-known-hosts-edpm-deployment-mwrz4" Dec 13 07:02:56 crc kubenswrapper[5048]: I1213 07:02:56.058234 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2k7gv\" (UniqueName: \"kubernetes.io/projected/a097705b-8b45-470f-8026-744ebdc4083a-kube-api-access-2k7gv\") pod \"ssh-known-hosts-edpm-deployment-mwrz4\" (UID: \"a097705b-8b45-470f-8026-744ebdc4083a\") " pod="openstack/ssh-known-hosts-edpm-deployment-mwrz4" Dec 13 07:02:56 crc kubenswrapper[5048]: I1213 07:02:56.120410 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-mwrz4" Dec 13 07:02:56 crc kubenswrapper[5048]: I1213 07:02:56.660811 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-mwrz4"] Dec 13 07:02:57 crc kubenswrapper[5048]: I1213 07:02:57.676152 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-mwrz4" event={"ID":"a097705b-8b45-470f-8026-744ebdc4083a","Type":"ContainerStarted","Data":"7f0ad10cc421c15de9d3fbc1e95119eadbd35dbad1819f6169dced58d871244d"} Dec 13 07:02:57 crc kubenswrapper[5048]: I1213 07:02:57.676499 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-mwrz4" event={"ID":"a097705b-8b45-470f-8026-744ebdc4083a","Type":"ContainerStarted","Data":"2490000a6070b91dbd7f34f3a8c37c54812b20f77d0bb1ab3a4195a9135c8858"} Dec 13 07:02:57 crc kubenswrapper[5048]: I1213 07:02:57.703955 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-mwrz4" podStartSLOduration=1.957245117 podStartE2EDuration="2.703937922s" podCreationTimestamp="2025-12-13 07:02:55 +0000 UTC" firstStartedPulling="2025-12-13 07:02:56.668698044 +0000 UTC m=+2010.535292625" lastFinishedPulling="2025-12-13 07:02:57.415390839 +0000 UTC m=+2011.281985430" observedRunningTime="2025-12-13 07:02:57.696164283 +0000 UTC m=+2011.562758874" watchObservedRunningTime="2025-12-13 07:02:57.703937922 +0000 UTC m=+2011.570532503" Dec 13 07:03:04 crc kubenswrapper[5048]: I1213 07:03:04.750430 5048 generic.go:334] "Generic (PLEG): container finished" podID="a097705b-8b45-470f-8026-744ebdc4083a" containerID="7f0ad10cc421c15de9d3fbc1e95119eadbd35dbad1819f6169dced58d871244d" exitCode=0 Dec 13 07:03:04 crc kubenswrapper[5048]: I1213 07:03:04.750605 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-mwrz4" event={"ID":"a097705b-8b45-470f-8026-744ebdc4083a","Type":"ContainerDied","Data":"7f0ad10cc421c15de9d3fbc1e95119eadbd35dbad1819f6169dced58d871244d"} Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.235909 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-mwrz4" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.359896 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/a097705b-8b45-470f-8026-744ebdc4083a-inventory-0\") pod \"a097705b-8b45-470f-8026-744ebdc4083a\" (UID: \"a097705b-8b45-470f-8026-744ebdc4083a\") " Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.360117 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2k7gv\" (UniqueName: \"kubernetes.io/projected/a097705b-8b45-470f-8026-744ebdc4083a-kube-api-access-2k7gv\") pod \"a097705b-8b45-470f-8026-744ebdc4083a\" (UID: \"a097705b-8b45-470f-8026-744ebdc4083a\") " Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.360149 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a097705b-8b45-470f-8026-744ebdc4083a-ssh-key-openstack-edpm-ipam\") pod \"a097705b-8b45-470f-8026-744ebdc4083a\" (UID: \"a097705b-8b45-470f-8026-744ebdc4083a\") " Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.371664 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a097705b-8b45-470f-8026-744ebdc4083a-kube-api-access-2k7gv" (OuterVolumeSpecName: "kube-api-access-2k7gv") pod "a097705b-8b45-470f-8026-744ebdc4083a" (UID: "a097705b-8b45-470f-8026-744ebdc4083a"). InnerVolumeSpecName "kube-api-access-2k7gv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.390582 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a097705b-8b45-470f-8026-744ebdc4083a-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "a097705b-8b45-470f-8026-744ebdc4083a" (UID: "a097705b-8b45-470f-8026-744ebdc4083a"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.392999 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a097705b-8b45-470f-8026-744ebdc4083a-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "a097705b-8b45-470f-8026-744ebdc4083a" (UID: "a097705b-8b45-470f-8026-744ebdc4083a"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.462387 5048 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a097705b-8b45-470f-8026-744ebdc4083a-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.462470 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2k7gv\" (UniqueName: \"kubernetes.io/projected/a097705b-8b45-470f-8026-744ebdc4083a-kube-api-access-2k7gv\") on node \"crc\" DevicePath \"\"" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.462489 5048 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/a097705b-8b45-470f-8026-744ebdc4083a-inventory-0\") on node \"crc\" DevicePath \"\"" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.767028 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-mwrz4" event={"ID":"a097705b-8b45-470f-8026-744ebdc4083a","Type":"ContainerDied","Data":"2490000a6070b91dbd7f34f3a8c37c54812b20f77d0bb1ab3a4195a9135c8858"} Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.767074 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2490000a6070b91dbd7f34f3a8c37c54812b20f77d0bb1ab3a4195a9135c8858" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.767087 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-mwrz4" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.835060 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-42g5n"] Dec 13 07:03:06 crc kubenswrapper[5048]: E1213 07:03:06.835492 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a097705b-8b45-470f-8026-744ebdc4083a" containerName="ssh-known-hosts-edpm-deployment" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.835510 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="a097705b-8b45-470f-8026-744ebdc4083a" containerName="ssh-known-hosts-edpm-deployment" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.835681 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="a097705b-8b45-470f-8026-744ebdc4083a" containerName="ssh-known-hosts-edpm-deployment" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.836298 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-42g5n" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.844565 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.844905 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.845036 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.845084 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hgp7p" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.846631 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-42g5n"] Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.872394 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bc147090-461f-4896-a08e-59dddc7c14cc-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-42g5n\" (UID: \"bc147090-461f-4896-a08e-59dddc7c14cc\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-42g5n" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.872864 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bkhc\" (UniqueName: \"kubernetes.io/projected/bc147090-461f-4896-a08e-59dddc7c14cc-kube-api-access-8bkhc\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-42g5n\" (UID: \"bc147090-461f-4896-a08e-59dddc7c14cc\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-42g5n" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.872999 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bc147090-461f-4896-a08e-59dddc7c14cc-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-42g5n\" (UID: \"bc147090-461f-4896-a08e-59dddc7c14cc\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-42g5n" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.974149 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bkhc\" (UniqueName: \"kubernetes.io/projected/bc147090-461f-4896-a08e-59dddc7c14cc-kube-api-access-8bkhc\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-42g5n\" (UID: \"bc147090-461f-4896-a08e-59dddc7c14cc\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-42g5n" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.974222 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bc147090-461f-4896-a08e-59dddc7c14cc-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-42g5n\" (UID: \"bc147090-461f-4896-a08e-59dddc7c14cc\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-42g5n" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.974301 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bc147090-461f-4896-a08e-59dddc7c14cc-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-42g5n\" (UID: \"bc147090-461f-4896-a08e-59dddc7c14cc\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-42g5n" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.979516 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bc147090-461f-4896-a08e-59dddc7c14cc-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-42g5n\" (UID: \"bc147090-461f-4896-a08e-59dddc7c14cc\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-42g5n" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.988903 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bc147090-461f-4896-a08e-59dddc7c14cc-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-42g5n\" (UID: \"bc147090-461f-4896-a08e-59dddc7c14cc\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-42g5n" Dec 13 07:03:06 crc kubenswrapper[5048]: I1213 07:03:06.991764 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bkhc\" (UniqueName: \"kubernetes.io/projected/bc147090-461f-4896-a08e-59dddc7c14cc-kube-api-access-8bkhc\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-42g5n\" (UID: \"bc147090-461f-4896-a08e-59dddc7c14cc\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-42g5n" Dec 13 07:03:07 crc kubenswrapper[5048]: I1213 07:03:07.157270 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-42g5n" Dec 13 07:03:07 crc kubenswrapper[5048]: I1213 07:03:07.653176 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-42g5n"] Dec 13 07:03:07 crc kubenswrapper[5048]: I1213 07:03:07.776159 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-42g5n" event={"ID":"bc147090-461f-4896-a08e-59dddc7c14cc","Type":"ContainerStarted","Data":"e1ee1948a4fa43a744ac11f700ce0b49fa178e6159147388171c8dee7eb7f4c3"} Dec 13 07:03:09 crc kubenswrapper[5048]: I1213 07:03:09.808964 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-42g5n" event={"ID":"bc147090-461f-4896-a08e-59dddc7c14cc","Type":"ContainerStarted","Data":"7cbf0147a95059a941a227cd8ab368ac270783a2170758bdbabf6e14d01f785a"} Dec 13 07:03:09 crc kubenswrapper[5048]: I1213 07:03:09.837571 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-42g5n" podStartSLOduration=2.803237522 podStartE2EDuration="3.837547044s" podCreationTimestamp="2025-12-13 07:03:06 +0000 UTC" firstStartedPulling="2025-12-13 07:03:07.659635223 +0000 UTC m=+2021.526229804" lastFinishedPulling="2025-12-13 07:03:08.693944745 +0000 UTC m=+2022.560539326" observedRunningTime="2025-12-13 07:03:09.832397044 +0000 UTC m=+2023.698991675" watchObservedRunningTime="2025-12-13 07:03:09.837547044 +0000 UTC m=+2023.704141625" Dec 13 07:03:16 crc kubenswrapper[5048]: I1213 07:03:16.870017 5048 generic.go:334] "Generic (PLEG): container finished" podID="bc147090-461f-4896-a08e-59dddc7c14cc" containerID="7cbf0147a95059a941a227cd8ab368ac270783a2170758bdbabf6e14d01f785a" exitCode=0 Dec 13 07:03:16 crc kubenswrapper[5048]: I1213 07:03:16.870129 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-42g5n" event={"ID":"bc147090-461f-4896-a08e-59dddc7c14cc","Type":"ContainerDied","Data":"7cbf0147a95059a941a227cd8ab368ac270783a2170758bdbabf6e14d01f785a"} Dec 13 07:03:18 crc kubenswrapper[5048]: I1213 07:03:18.292528 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-42g5n" Dec 13 07:03:18 crc kubenswrapper[5048]: I1213 07:03:18.303815 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bc147090-461f-4896-a08e-59dddc7c14cc-ssh-key\") pod \"bc147090-461f-4896-a08e-59dddc7c14cc\" (UID: \"bc147090-461f-4896-a08e-59dddc7c14cc\") " Dec 13 07:03:18 crc kubenswrapper[5048]: I1213 07:03:18.303889 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8bkhc\" (UniqueName: \"kubernetes.io/projected/bc147090-461f-4896-a08e-59dddc7c14cc-kube-api-access-8bkhc\") pod \"bc147090-461f-4896-a08e-59dddc7c14cc\" (UID: \"bc147090-461f-4896-a08e-59dddc7c14cc\") " Dec 13 07:03:18 crc kubenswrapper[5048]: I1213 07:03:18.303961 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bc147090-461f-4896-a08e-59dddc7c14cc-inventory\") pod \"bc147090-461f-4896-a08e-59dddc7c14cc\" (UID: \"bc147090-461f-4896-a08e-59dddc7c14cc\") " Dec 13 07:03:18 crc kubenswrapper[5048]: I1213 07:03:18.311691 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc147090-461f-4896-a08e-59dddc7c14cc-kube-api-access-8bkhc" (OuterVolumeSpecName: "kube-api-access-8bkhc") pod "bc147090-461f-4896-a08e-59dddc7c14cc" (UID: "bc147090-461f-4896-a08e-59dddc7c14cc"). InnerVolumeSpecName "kube-api-access-8bkhc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:03:18 crc kubenswrapper[5048]: I1213 07:03:18.337128 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc147090-461f-4896-a08e-59dddc7c14cc-inventory" (OuterVolumeSpecName: "inventory") pod "bc147090-461f-4896-a08e-59dddc7c14cc" (UID: "bc147090-461f-4896-a08e-59dddc7c14cc"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:03:18 crc kubenswrapper[5048]: I1213 07:03:18.342103 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc147090-461f-4896-a08e-59dddc7c14cc-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "bc147090-461f-4896-a08e-59dddc7c14cc" (UID: "bc147090-461f-4896-a08e-59dddc7c14cc"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:03:18 crc kubenswrapper[5048]: I1213 07:03:18.405965 5048 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bc147090-461f-4896-a08e-59dddc7c14cc-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 13 07:03:18 crc kubenswrapper[5048]: I1213 07:03:18.406000 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8bkhc\" (UniqueName: \"kubernetes.io/projected/bc147090-461f-4896-a08e-59dddc7c14cc-kube-api-access-8bkhc\") on node \"crc\" DevicePath \"\"" Dec 13 07:03:18 crc kubenswrapper[5048]: I1213 07:03:18.406016 5048 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bc147090-461f-4896-a08e-59dddc7c14cc-inventory\") on node \"crc\" DevicePath \"\"" Dec 13 07:03:18 crc kubenswrapper[5048]: I1213 07:03:18.918926 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-42g5n" event={"ID":"bc147090-461f-4896-a08e-59dddc7c14cc","Type":"ContainerDied","Data":"e1ee1948a4fa43a744ac11f700ce0b49fa178e6159147388171c8dee7eb7f4c3"} Dec 13 07:03:18 crc kubenswrapper[5048]: I1213 07:03:18.918969 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e1ee1948a4fa43a744ac11f700ce0b49fa178e6159147388171c8dee7eb7f4c3" Dec 13 07:03:18 crc kubenswrapper[5048]: I1213 07:03:18.919020 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-42g5n" Dec 13 07:03:18 crc kubenswrapper[5048]: I1213 07:03:18.963936 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb"] Dec 13 07:03:18 crc kubenswrapper[5048]: E1213 07:03:18.964478 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc147090-461f-4896-a08e-59dddc7c14cc" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Dec 13 07:03:18 crc kubenswrapper[5048]: I1213 07:03:18.964502 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc147090-461f-4896-a08e-59dddc7c14cc" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Dec 13 07:03:18 crc kubenswrapper[5048]: I1213 07:03:18.964771 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc147090-461f-4896-a08e-59dddc7c14cc" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Dec 13 07:03:18 crc kubenswrapper[5048]: I1213 07:03:18.966155 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb" Dec 13 07:03:18 crc kubenswrapper[5048]: I1213 07:03:18.968714 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 13 07:03:18 crc kubenswrapper[5048]: I1213 07:03:18.968986 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 13 07:03:18 crc kubenswrapper[5048]: I1213 07:03:18.969111 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 13 07:03:18 crc kubenswrapper[5048]: I1213 07:03:18.969514 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hgp7p" Dec 13 07:03:18 crc kubenswrapper[5048]: I1213 07:03:18.974087 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb"] Dec 13 07:03:19 crc kubenswrapper[5048]: I1213 07:03:19.014764 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/360c2d75-bc2a-408a-bfa8-4c250e32d6ab-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb\" (UID: \"360c2d75-bc2a-408a-bfa8-4c250e32d6ab\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb" Dec 13 07:03:19 crc kubenswrapper[5048]: I1213 07:03:19.014838 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/360c2d75-bc2a-408a-bfa8-4c250e32d6ab-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb\" (UID: \"360c2d75-bc2a-408a-bfa8-4c250e32d6ab\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb" Dec 13 07:03:19 crc kubenswrapper[5048]: I1213 07:03:19.014890 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzq2h\" (UniqueName: \"kubernetes.io/projected/360c2d75-bc2a-408a-bfa8-4c250e32d6ab-kube-api-access-wzq2h\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb\" (UID: \"360c2d75-bc2a-408a-bfa8-4c250e32d6ab\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb" Dec 13 07:03:19 crc kubenswrapper[5048]: I1213 07:03:19.117039 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/360c2d75-bc2a-408a-bfa8-4c250e32d6ab-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb\" (UID: \"360c2d75-bc2a-408a-bfa8-4c250e32d6ab\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb" Dec 13 07:03:19 crc kubenswrapper[5048]: I1213 07:03:19.117467 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/360c2d75-bc2a-408a-bfa8-4c250e32d6ab-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb\" (UID: \"360c2d75-bc2a-408a-bfa8-4c250e32d6ab\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb" Dec 13 07:03:19 crc kubenswrapper[5048]: I1213 07:03:19.117565 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzq2h\" (UniqueName: \"kubernetes.io/projected/360c2d75-bc2a-408a-bfa8-4c250e32d6ab-kube-api-access-wzq2h\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb\" (UID: \"360c2d75-bc2a-408a-bfa8-4c250e32d6ab\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb" Dec 13 07:03:19 crc kubenswrapper[5048]: I1213 07:03:19.122904 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/360c2d75-bc2a-408a-bfa8-4c250e32d6ab-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb\" (UID: \"360c2d75-bc2a-408a-bfa8-4c250e32d6ab\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb" Dec 13 07:03:19 crc kubenswrapper[5048]: I1213 07:03:19.132984 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/360c2d75-bc2a-408a-bfa8-4c250e32d6ab-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb\" (UID: \"360c2d75-bc2a-408a-bfa8-4c250e32d6ab\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb" Dec 13 07:03:19 crc kubenswrapper[5048]: I1213 07:03:19.137574 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzq2h\" (UniqueName: \"kubernetes.io/projected/360c2d75-bc2a-408a-bfa8-4c250e32d6ab-kube-api-access-wzq2h\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb\" (UID: \"360c2d75-bc2a-408a-bfa8-4c250e32d6ab\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb" Dec 13 07:03:19 crc kubenswrapper[5048]: I1213 07:03:19.282855 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb" Dec 13 07:03:19 crc kubenswrapper[5048]: I1213 07:03:19.817534 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb"] Dec 13 07:03:19 crc kubenswrapper[5048]: I1213 07:03:19.819375 5048 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 13 07:03:19 crc kubenswrapper[5048]: I1213 07:03:19.930466 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb" event={"ID":"360c2d75-bc2a-408a-bfa8-4c250e32d6ab","Type":"ContainerStarted","Data":"a8f419854848850d9fe49f7e4f1a9138b15e865d571aa37fd754b3b386df5807"} Dec 13 07:03:20 crc kubenswrapper[5048]: I1213 07:03:20.940718 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb" event={"ID":"360c2d75-bc2a-408a-bfa8-4c250e32d6ab","Type":"ContainerStarted","Data":"5de10da223353cb6934e7f9a7059791bebddb9300112a5cd888237afc71d578b"} Dec 13 07:03:20 crc kubenswrapper[5048]: I1213 07:03:20.960048 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb" podStartSLOduration=2.477415374 podStartE2EDuration="2.960028406s" podCreationTimestamp="2025-12-13 07:03:18 +0000 UTC" firstStartedPulling="2025-12-13 07:03:19.819144991 +0000 UTC m=+2033.685739572" lastFinishedPulling="2025-12-13 07:03:20.301758013 +0000 UTC m=+2034.168352604" observedRunningTime="2025-12-13 07:03:20.955159874 +0000 UTC m=+2034.821754455" watchObservedRunningTime="2025-12-13 07:03:20.960028406 +0000 UTC m=+2034.826623007" Dec 13 07:03:30 crc kubenswrapper[5048]: I1213 07:03:30.026307 5048 generic.go:334] "Generic (PLEG): container finished" podID="360c2d75-bc2a-408a-bfa8-4c250e32d6ab" containerID="5de10da223353cb6934e7f9a7059791bebddb9300112a5cd888237afc71d578b" exitCode=0 Dec 13 07:03:30 crc kubenswrapper[5048]: I1213 07:03:30.026433 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb" event={"ID":"360c2d75-bc2a-408a-bfa8-4c250e32d6ab","Type":"ContainerDied","Data":"5de10da223353cb6934e7f9a7059791bebddb9300112a5cd888237afc71d578b"} Dec 13 07:03:31 crc kubenswrapper[5048]: I1213 07:03:31.461698 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb" Dec 13 07:03:31 crc kubenswrapper[5048]: I1213 07:03:31.663585 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/360c2d75-bc2a-408a-bfa8-4c250e32d6ab-inventory\") pod \"360c2d75-bc2a-408a-bfa8-4c250e32d6ab\" (UID: \"360c2d75-bc2a-408a-bfa8-4c250e32d6ab\") " Dec 13 07:03:31 crc kubenswrapper[5048]: I1213 07:03:31.663669 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/360c2d75-bc2a-408a-bfa8-4c250e32d6ab-ssh-key\") pod \"360c2d75-bc2a-408a-bfa8-4c250e32d6ab\" (UID: \"360c2d75-bc2a-408a-bfa8-4c250e32d6ab\") " Dec 13 07:03:31 crc kubenswrapper[5048]: I1213 07:03:31.664552 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wzq2h\" (UniqueName: \"kubernetes.io/projected/360c2d75-bc2a-408a-bfa8-4c250e32d6ab-kube-api-access-wzq2h\") pod \"360c2d75-bc2a-408a-bfa8-4c250e32d6ab\" (UID: \"360c2d75-bc2a-408a-bfa8-4c250e32d6ab\") " Dec 13 07:03:31 crc kubenswrapper[5048]: I1213 07:03:31.672240 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/360c2d75-bc2a-408a-bfa8-4c250e32d6ab-kube-api-access-wzq2h" (OuterVolumeSpecName: "kube-api-access-wzq2h") pod "360c2d75-bc2a-408a-bfa8-4c250e32d6ab" (UID: "360c2d75-bc2a-408a-bfa8-4c250e32d6ab"). InnerVolumeSpecName "kube-api-access-wzq2h". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:03:31 crc kubenswrapper[5048]: I1213 07:03:31.699585 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/360c2d75-bc2a-408a-bfa8-4c250e32d6ab-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "360c2d75-bc2a-408a-bfa8-4c250e32d6ab" (UID: "360c2d75-bc2a-408a-bfa8-4c250e32d6ab"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:03:31 crc kubenswrapper[5048]: I1213 07:03:31.722335 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/360c2d75-bc2a-408a-bfa8-4c250e32d6ab-inventory" (OuterVolumeSpecName: "inventory") pod "360c2d75-bc2a-408a-bfa8-4c250e32d6ab" (UID: "360c2d75-bc2a-408a-bfa8-4c250e32d6ab"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:03:31 crc kubenswrapper[5048]: I1213 07:03:31.783734 5048 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/360c2d75-bc2a-408a-bfa8-4c250e32d6ab-inventory\") on node \"crc\" DevicePath \"\"" Dec 13 07:03:31 crc kubenswrapper[5048]: I1213 07:03:31.783867 5048 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/360c2d75-bc2a-408a-bfa8-4c250e32d6ab-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 13 07:03:31 crc kubenswrapper[5048]: I1213 07:03:31.783919 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wzq2h\" (UniqueName: \"kubernetes.io/projected/360c2d75-bc2a-408a-bfa8-4c250e32d6ab-kube-api-access-wzq2h\") on node \"crc\" DevicePath \"\"" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.087259 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb" event={"ID":"360c2d75-bc2a-408a-bfa8-4c250e32d6ab","Type":"ContainerDied","Data":"a8f419854848850d9fe49f7e4f1a9138b15e865d571aa37fd754b3b386df5807"} Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.087803 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a8f419854848850d9fe49f7e4f1a9138b15e865d571aa37fd754b3b386df5807" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.087379 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.140525 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz"] Dec 13 07:03:32 crc kubenswrapper[5048]: E1213 07:03:32.141125 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="360c2d75-bc2a-408a-bfa8-4c250e32d6ab" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.141157 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="360c2d75-bc2a-408a-bfa8-4c250e32d6ab" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.141424 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="360c2d75-bc2a-408a-bfa8-4c250e32d6ab" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.142929 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.146542 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.146544 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.146756 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.147011 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hgp7p" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.147223 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.147252 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.147296 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.147790 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.151859 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz"] Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.294100 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-psm45\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-kube-api-access-psm45\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.294173 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.294927 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.295014 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.295060 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.295286 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.295398 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.295480 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.295559 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.295622 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.295714 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.295765 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.295817 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.295864 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.397646 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.397738 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.397782 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.397835 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.397884 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.397949 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.397992 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.398027 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.398063 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.398150 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-psm45\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-kube-api-access-psm45\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.398198 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.398238 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.398281 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.398314 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.403747 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.404135 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.406560 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.406678 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.407025 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.407207 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.407637 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.407675 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.408026 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.408762 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.409998 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.410164 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.416190 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.420201 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-psm45\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-kube-api-access-psm45\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:32 crc kubenswrapper[5048]: I1213 07:03:32.471982 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:03:33 crc kubenswrapper[5048]: I1213 07:03:33.098061 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz"] Dec 13 07:03:33 crc kubenswrapper[5048]: W1213 07:03:33.101599 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda6c582a0_39fc_4d6c_aa84_367f11c30ff1.slice/crio-4a5fd4c606a573f9944c1d224c50a14c6bb77e814eb8506aa77e7c43cfbd1ab3 WatchSource:0}: Error finding container 4a5fd4c606a573f9944c1d224c50a14c6bb77e814eb8506aa77e7c43cfbd1ab3: Status 404 returned error can't find the container with id 4a5fd4c606a573f9944c1d224c50a14c6bb77e814eb8506aa77e7c43cfbd1ab3 Dec 13 07:03:34 crc kubenswrapper[5048]: I1213 07:03:34.107905 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" event={"ID":"a6c582a0-39fc-4d6c-aa84-367f11c30ff1","Type":"ContainerStarted","Data":"15b6e4b952155f41db463bd09815f51047d5165f5278e47f883d557638f43d83"} Dec 13 07:03:34 crc kubenswrapper[5048]: I1213 07:03:34.109550 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" event={"ID":"a6c582a0-39fc-4d6c-aa84-367f11c30ff1","Type":"ContainerStarted","Data":"4a5fd4c606a573f9944c1d224c50a14c6bb77e814eb8506aa77e7c43cfbd1ab3"} Dec 13 07:03:34 crc kubenswrapper[5048]: I1213 07:03:34.130505 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" podStartSLOduration=1.617361416 podStartE2EDuration="2.130483618s" podCreationTimestamp="2025-12-13 07:03:32 +0000 UTC" firstStartedPulling="2025-12-13 07:03:33.103702768 +0000 UTC m=+2046.970297349" lastFinishedPulling="2025-12-13 07:03:33.61682497 +0000 UTC m=+2047.483419551" observedRunningTime="2025-12-13 07:03:34.124043645 +0000 UTC m=+2047.990638276" watchObservedRunningTime="2025-12-13 07:03:34.130483618 +0000 UTC m=+2047.997078199" Dec 13 07:03:46 crc kubenswrapper[5048]: I1213 07:03:46.216511 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:03:46 crc kubenswrapper[5048]: I1213 07:03:46.217382 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:04:10 crc kubenswrapper[5048]: I1213 07:04:10.444972 5048 generic.go:334] "Generic (PLEG): container finished" podID="a6c582a0-39fc-4d6c-aa84-367f11c30ff1" containerID="15b6e4b952155f41db463bd09815f51047d5165f5278e47f883d557638f43d83" exitCode=0 Dec 13 07:04:10 crc kubenswrapper[5048]: I1213 07:04:10.445076 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" event={"ID":"a6c582a0-39fc-4d6c-aa84-367f11c30ff1","Type":"ContainerDied","Data":"15b6e4b952155f41db463bd09815f51047d5165f5278e47f883d557638f43d83"} Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.860853 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.986182 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-ovn-combined-ca-bundle\") pod \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.986255 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-telemetry-combined-ca-bundle\") pod \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.986292 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-libvirt-combined-ca-bundle\") pod \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.986317 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-nova-combined-ca-bundle\") pod \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.986355 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-inventory\") pod \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.986464 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-ssh-key\") pod \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.986501 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.986537 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-bootstrap-combined-ca-bundle\") pod \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.986596 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-repo-setup-combined-ca-bundle\") pod \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.986631 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.986696 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-neutron-metadata-combined-ca-bundle\") pod \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.986743 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-psm45\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-kube-api-access-psm45\") pod \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.986796 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.986831 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-ovn-default-certs-0\") pod \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\" (UID: \"a6c582a0-39fc-4d6c-aa84-367f11c30ff1\") " Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.994325 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-kube-api-access-psm45" (OuterVolumeSpecName: "kube-api-access-psm45") pod "a6c582a0-39fc-4d6c-aa84-367f11c30ff1" (UID: "a6c582a0-39fc-4d6c-aa84-367f11c30ff1"). InnerVolumeSpecName "kube-api-access-psm45". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.994793 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "a6c582a0-39fc-4d6c-aa84-367f11c30ff1" (UID: "a6c582a0-39fc-4d6c-aa84-367f11c30ff1"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.995654 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "a6c582a0-39fc-4d6c-aa84-367f11c30ff1" (UID: "a6c582a0-39fc-4d6c-aa84-367f11c30ff1"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.995767 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "a6c582a0-39fc-4d6c-aa84-367f11c30ff1" (UID: "a6c582a0-39fc-4d6c-aa84-367f11c30ff1"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.996690 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "a6c582a0-39fc-4d6c-aa84-367f11c30ff1" (UID: "a6c582a0-39fc-4d6c-aa84-367f11c30ff1"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.996738 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "a6c582a0-39fc-4d6c-aa84-367f11c30ff1" (UID: "a6c582a0-39fc-4d6c-aa84-367f11c30ff1"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.997277 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "a6c582a0-39fc-4d6c-aa84-367f11c30ff1" (UID: "a6c582a0-39fc-4d6c-aa84-367f11c30ff1"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.997334 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "a6c582a0-39fc-4d6c-aa84-367f11c30ff1" (UID: "a6c582a0-39fc-4d6c-aa84-367f11c30ff1"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.997612 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "a6c582a0-39fc-4d6c-aa84-367f11c30ff1" (UID: "a6c582a0-39fc-4d6c-aa84-367f11c30ff1"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.998255 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "a6c582a0-39fc-4d6c-aa84-367f11c30ff1" (UID: "a6c582a0-39fc-4d6c-aa84-367f11c30ff1"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:04:11 crc kubenswrapper[5048]: I1213 07:04:11.998734 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "a6c582a0-39fc-4d6c-aa84-367f11c30ff1" (UID: "a6c582a0-39fc-4d6c-aa84-367f11c30ff1"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.001181 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "a6c582a0-39fc-4d6c-aa84-367f11c30ff1" (UID: "a6c582a0-39fc-4d6c-aa84-367f11c30ff1"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.025803 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-inventory" (OuterVolumeSpecName: "inventory") pod "a6c582a0-39fc-4d6c-aa84-367f11c30ff1" (UID: "a6c582a0-39fc-4d6c-aa84-367f11c30ff1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.028191 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a6c582a0-39fc-4d6c-aa84-367f11c30ff1" (UID: "a6c582a0-39fc-4d6c-aa84-367f11c30ff1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.098496 5048 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.098545 5048 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.098564 5048 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.098576 5048 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.098588 5048 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.098602 5048 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.098613 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-psm45\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-kube-api-access-psm45\") on node \"crc\" DevicePath \"\"" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.098624 5048 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.098634 5048 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.098647 5048 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.098657 5048 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.098669 5048 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.098681 5048 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.098691 5048 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a6c582a0-39fc-4d6c-aa84-367f11c30ff1-inventory\") on node \"crc\" DevicePath \"\"" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.467427 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" event={"ID":"a6c582a0-39fc-4d6c-aa84-367f11c30ff1","Type":"ContainerDied","Data":"4a5fd4c606a573f9944c1d224c50a14c6bb77e814eb8506aa77e7c43cfbd1ab3"} Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.467526 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4a5fd4c606a573f9944c1d224c50a14c6bb77e814eb8506aa77e7c43cfbd1ab3" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.467630 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.579711 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt"] Dec 13 07:04:12 crc kubenswrapper[5048]: E1213 07:04:12.580090 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6c582a0-39fc-4d6c-aa84-367f11c30ff1" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.580105 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6c582a0-39fc-4d6c-aa84-367f11c30ff1" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.580264 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6c582a0-39fc-4d6c-aa84-367f11c30ff1" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.580999 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.586976 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.587160 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.587641 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.587788 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.588310 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hgp7p" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.594250 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt"] Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.709174 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0376236-384b-44b9-abbb-a1fe41557a88-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-s7hbt\" (UID: \"c0376236-384b-44b9-abbb-a1fe41557a88\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.709357 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c0376236-384b-44b9-abbb-a1fe41557a88-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-s7hbt\" (UID: \"c0376236-384b-44b9-abbb-a1fe41557a88\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.709411 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c0376236-384b-44b9-abbb-a1fe41557a88-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-s7hbt\" (UID: \"c0376236-384b-44b9-abbb-a1fe41557a88\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.709620 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c0376236-384b-44b9-abbb-a1fe41557a88-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-s7hbt\" (UID: \"c0376236-384b-44b9-abbb-a1fe41557a88\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.709717 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jkhgz\" (UniqueName: \"kubernetes.io/projected/c0376236-384b-44b9-abbb-a1fe41557a88-kube-api-access-jkhgz\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-s7hbt\" (UID: \"c0376236-384b-44b9-abbb-a1fe41557a88\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.810946 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c0376236-384b-44b9-abbb-a1fe41557a88-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-s7hbt\" (UID: \"c0376236-384b-44b9-abbb-a1fe41557a88\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.810987 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c0376236-384b-44b9-abbb-a1fe41557a88-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-s7hbt\" (UID: \"c0376236-384b-44b9-abbb-a1fe41557a88\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.811050 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c0376236-384b-44b9-abbb-a1fe41557a88-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-s7hbt\" (UID: \"c0376236-384b-44b9-abbb-a1fe41557a88\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.811090 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jkhgz\" (UniqueName: \"kubernetes.io/projected/c0376236-384b-44b9-abbb-a1fe41557a88-kube-api-access-jkhgz\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-s7hbt\" (UID: \"c0376236-384b-44b9-abbb-a1fe41557a88\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.811152 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0376236-384b-44b9-abbb-a1fe41557a88-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-s7hbt\" (UID: \"c0376236-384b-44b9-abbb-a1fe41557a88\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.812423 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c0376236-384b-44b9-abbb-a1fe41557a88-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-s7hbt\" (UID: \"c0376236-384b-44b9-abbb-a1fe41557a88\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.816698 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0376236-384b-44b9-abbb-a1fe41557a88-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-s7hbt\" (UID: \"c0376236-384b-44b9-abbb-a1fe41557a88\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.816716 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c0376236-384b-44b9-abbb-a1fe41557a88-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-s7hbt\" (UID: \"c0376236-384b-44b9-abbb-a1fe41557a88\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.817663 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c0376236-384b-44b9-abbb-a1fe41557a88-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-s7hbt\" (UID: \"c0376236-384b-44b9-abbb-a1fe41557a88\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.829172 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jkhgz\" (UniqueName: \"kubernetes.io/projected/c0376236-384b-44b9-abbb-a1fe41557a88-kube-api-access-jkhgz\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-s7hbt\" (UID: \"c0376236-384b-44b9-abbb-a1fe41557a88\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" Dec 13 07:04:12 crc kubenswrapper[5048]: I1213 07:04:12.902795 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" Dec 13 07:04:13 crc kubenswrapper[5048]: I1213 07:04:13.423705 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt"] Dec 13 07:04:13 crc kubenswrapper[5048]: I1213 07:04:13.478456 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" event={"ID":"c0376236-384b-44b9-abbb-a1fe41557a88","Type":"ContainerStarted","Data":"f607e706af5b0c03154b7659d57d7f8c1a9ac8b7e941c78b8376b6fa274189eb"} Dec 13 07:04:14 crc kubenswrapper[5048]: I1213 07:04:14.502195 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" event={"ID":"c0376236-384b-44b9-abbb-a1fe41557a88","Type":"ContainerStarted","Data":"915b90d288c45d1dcb77a638f7c38009164a675d26aac12865fd29c1f63b25d5"} Dec 13 07:04:14 crc kubenswrapper[5048]: I1213 07:04:14.538494 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" podStartSLOduration=1.938773797 podStartE2EDuration="2.538456142s" podCreationTimestamp="2025-12-13 07:04:12 +0000 UTC" firstStartedPulling="2025-12-13 07:04:13.432141839 +0000 UTC m=+2087.298736420" lastFinishedPulling="2025-12-13 07:04:14.031824174 +0000 UTC m=+2087.898418765" observedRunningTime="2025-12-13 07:04:14.531072733 +0000 UTC m=+2088.397667334" watchObservedRunningTime="2025-12-13 07:04:14.538456142 +0000 UTC m=+2088.405050723" Dec 13 07:04:16 crc kubenswrapper[5048]: I1213 07:04:16.216018 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:04:16 crc kubenswrapper[5048]: I1213 07:04:16.216106 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:04:46 crc kubenswrapper[5048]: I1213 07:04:46.215813 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:04:46 crc kubenswrapper[5048]: I1213 07:04:46.216467 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:04:46 crc kubenswrapper[5048]: I1213 07:04:46.216506 5048 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 07:04:46 crc kubenswrapper[5048]: I1213 07:04:46.216978 5048 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"53e69e2487d25cf71ad6303383d3f610cb5fd038d34d4d4b9e63796a8c974cf5"} pod="openshift-machine-config-operator/machine-config-daemon-j7hns" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 13 07:04:46 crc kubenswrapper[5048]: I1213 07:04:46.217022 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" containerID="cri-o://53e69e2487d25cf71ad6303383d3f610cb5fd038d34d4d4b9e63796a8c974cf5" gracePeriod=600 Dec 13 07:04:46 crc kubenswrapper[5048]: I1213 07:04:46.795717 5048 generic.go:334] "Generic (PLEG): container finished" podID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerID="53e69e2487d25cf71ad6303383d3f610cb5fd038d34d4d4b9e63796a8c974cf5" exitCode=0 Dec 13 07:04:46 crc kubenswrapper[5048]: I1213 07:04:46.795783 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerDied","Data":"53e69e2487d25cf71ad6303383d3f610cb5fd038d34d4d4b9e63796a8c974cf5"} Dec 13 07:04:46 crc kubenswrapper[5048]: I1213 07:04:46.796151 5048 scope.go:117] "RemoveContainer" containerID="c3c3c010eb86b49ee1ff9e8da0dddc32f266e163f2ef609d402b38792e3c4862" Dec 13 07:04:47 crc kubenswrapper[5048]: I1213 07:04:47.807995 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerStarted","Data":"13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6"} Dec 13 07:05:15 crc kubenswrapper[5048]: I1213 07:05:15.051508 5048 generic.go:334] "Generic (PLEG): container finished" podID="c0376236-384b-44b9-abbb-a1fe41557a88" containerID="915b90d288c45d1dcb77a638f7c38009164a675d26aac12865fd29c1f63b25d5" exitCode=0 Dec 13 07:05:15 crc kubenswrapper[5048]: I1213 07:05:15.051587 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" event={"ID":"c0376236-384b-44b9-abbb-a1fe41557a88","Type":"ContainerDied","Data":"915b90d288c45d1dcb77a638f7c38009164a675d26aac12865fd29c1f63b25d5"} Dec 13 07:05:16 crc kubenswrapper[5048]: I1213 07:05:16.558087 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" Dec 13 07:05:16 crc kubenswrapper[5048]: I1213 07:05:16.739300 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c0376236-384b-44b9-abbb-a1fe41557a88-ssh-key\") pod \"c0376236-384b-44b9-abbb-a1fe41557a88\" (UID: \"c0376236-384b-44b9-abbb-a1fe41557a88\") " Dec 13 07:05:16 crc kubenswrapper[5048]: I1213 07:05:16.739514 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c0376236-384b-44b9-abbb-a1fe41557a88-ovncontroller-config-0\") pod \"c0376236-384b-44b9-abbb-a1fe41557a88\" (UID: \"c0376236-384b-44b9-abbb-a1fe41557a88\") " Dec 13 07:05:16 crc kubenswrapper[5048]: I1213 07:05:16.739559 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0376236-384b-44b9-abbb-a1fe41557a88-ovn-combined-ca-bundle\") pod \"c0376236-384b-44b9-abbb-a1fe41557a88\" (UID: \"c0376236-384b-44b9-abbb-a1fe41557a88\") " Dec 13 07:05:16 crc kubenswrapper[5048]: I1213 07:05:16.739623 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c0376236-384b-44b9-abbb-a1fe41557a88-inventory\") pod \"c0376236-384b-44b9-abbb-a1fe41557a88\" (UID: \"c0376236-384b-44b9-abbb-a1fe41557a88\") " Dec 13 07:05:16 crc kubenswrapper[5048]: I1213 07:05:16.739686 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkhgz\" (UniqueName: \"kubernetes.io/projected/c0376236-384b-44b9-abbb-a1fe41557a88-kube-api-access-jkhgz\") pod \"c0376236-384b-44b9-abbb-a1fe41557a88\" (UID: \"c0376236-384b-44b9-abbb-a1fe41557a88\") " Dec 13 07:05:16 crc kubenswrapper[5048]: I1213 07:05:16.746637 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0376236-384b-44b9-abbb-a1fe41557a88-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "c0376236-384b-44b9-abbb-a1fe41557a88" (UID: "c0376236-384b-44b9-abbb-a1fe41557a88"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:05:16 crc kubenswrapper[5048]: I1213 07:05:16.748014 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0376236-384b-44b9-abbb-a1fe41557a88-kube-api-access-jkhgz" (OuterVolumeSpecName: "kube-api-access-jkhgz") pod "c0376236-384b-44b9-abbb-a1fe41557a88" (UID: "c0376236-384b-44b9-abbb-a1fe41557a88"). InnerVolumeSpecName "kube-api-access-jkhgz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:05:16 crc kubenswrapper[5048]: I1213 07:05:16.768629 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c0376236-384b-44b9-abbb-a1fe41557a88-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "c0376236-384b-44b9-abbb-a1fe41557a88" (UID: "c0376236-384b-44b9-abbb-a1fe41557a88"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 07:05:16 crc kubenswrapper[5048]: I1213 07:05:16.775039 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0376236-384b-44b9-abbb-a1fe41557a88-inventory" (OuterVolumeSpecName: "inventory") pod "c0376236-384b-44b9-abbb-a1fe41557a88" (UID: "c0376236-384b-44b9-abbb-a1fe41557a88"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:05:16 crc kubenswrapper[5048]: I1213 07:05:16.783966 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0376236-384b-44b9-abbb-a1fe41557a88-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c0376236-384b-44b9-abbb-a1fe41557a88" (UID: "c0376236-384b-44b9-abbb-a1fe41557a88"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:05:16 crc kubenswrapper[5048]: I1213 07:05:16.841602 5048 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c0376236-384b-44b9-abbb-a1fe41557a88-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Dec 13 07:05:16 crc kubenswrapper[5048]: I1213 07:05:16.841635 5048 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0376236-384b-44b9-abbb-a1fe41557a88-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 07:05:16 crc kubenswrapper[5048]: I1213 07:05:16.841645 5048 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c0376236-384b-44b9-abbb-a1fe41557a88-inventory\") on node \"crc\" DevicePath \"\"" Dec 13 07:05:16 crc kubenswrapper[5048]: I1213 07:05:16.841653 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkhgz\" (UniqueName: \"kubernetes.io/projected/c0376236-384b-44b9-abbb-a1fe41557a88-kube-api-access-jkhgz\") on node \"crc\" DevicePath \"\"" Dec 13 07:05:16 crc kubenswrapper[5048]: I1213 07:05:16.841661 5048 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c0376236-384b-44b9-abbb-a1fe41557a88-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.077955 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" event={"ID":"c0376236-384b-44b9-abbb-a1fe41557a88","Type":"ContainerDied","Data":"f607e706af5b0c03154b7659d57d7f8c1a9ac8b7e941c78b8376b6fa274189eb"} Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.078000 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f607e706af5b0c03154b7659d57d7f8c1a9ac8b7e941c78b8376b6fa274189eb" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.078064 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-s7hbt" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.229161 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb"] Dec 13 07:05:17 crc kubenswrapper[5048]: E1213 07:05:17.229647 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0376236-384b-44b9-abbb-a1fe41557a88" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.229670 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0376236-384b-44b9-abbb-a1fe41557a88" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.229854 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0376236-384b-44b9-abbb-a1fe41557a88" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.230525 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.232641 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.232893 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.233063 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.233250 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.235023 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.235722 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hgp7p" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.241943 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb"] Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.350692 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.351022 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.351076 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nqgq\" (UniqueName: \"kubernetes.io/projected/967349d1-5d27-480d-8e31-2eaa33e3c7e0-kube-api-access-5nqgq\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.351100 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.351180 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.351225 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.452651 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.452714 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.452742 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nqgq\" (UniqueName: \"kubernetes.io/projected/967349d1-5d27-480d-8e31-2eaa33e3c7e0-kube-api-access-5nqgq\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.452760 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.452815 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.452842 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.457423 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.457667 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.458124 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.458159 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.458262 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.473416 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nqgq\" (UniqueName: \"kubernetes.io/projected/967349d1-5d27-480d-8e31-2eaa33e3c7e0-kube-api-access-5nqgq\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" Dec 13 07:05:17 crc kubenswrapper[5048]: I1213 07:05:17.550480 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" Dec 13 07:05:18 crc kubenswrapper[5048]: I1213 07:05:18.121607 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb"] Dec 13 07:05:19 crc kubenswrapper[5048]: I1213 07:05:19.095306 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" event={"ID":"967349d1-5d27-480d-8e31-2eaa33e3c7e0","Type":"ContainerStarted","Data":"d4780195b699857348bfd505ce22fc905d91ffb794b9704f6dfbaea68b00f277"} Dec 13 07:05:20 crc kubenswrapper[5048]: I1213 07:05:20.106197 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" event={"ID":"967349d1-5d27-480d-8e31-2eaa33e3c7e0","Type":"ContainerStarted","Data":"155ed72a3354bd5a1ca3a8ecd0c0e990915e08c396ecbbcfdf345c3e40363c3e"} Dec 13 07:05:20 crc kubenswrapper[5048]: I1213 07:05:20.134256 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" podStartSLOduration=2.167587448 podStartE2EDuration="3.134237888s" podCreationTimestamp="2025-12-13 07:05:17 +0000 UTC" firstStartedPulling="2025-12-13 07:05:18.128126565 +0000 UTC m=+2151.994721146" lastFinishedPulling="2025-12-13 07:05:19.094777005 +0000 UTC m=+2152.961371586" observedRunningTime="2025-12-13 07:05:20.127831185 +0000 UTC m=+2153.994425786" watchObservedRunningTime="2025-12-13 07:05:20.134237888 +0000 UTC m=+2154.000832469" Dec 13 07:06:09 crc kubenswrapper[5048]: I1213 07:06:09.523142 5048 generic.go:334] "Generic (PLEG): container finished" podID="967349d1-5d27-480d-8e31-2eaa33e3c7e0" containerID="155ed72a3354bd5a1ca3a8ecd0c0e990915e08c396ecbbcfdf345c3e40363c3e" exitCode=0 Dec 13 07:06:09 crc kubenswrapper[5048]: I1213 07:06:09.523258 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" event={"ID":"967349d1-5d27-480d-8e31-2eaa33e3c7e0","Type":"ContainerDied","Data":"155ed72a3354bd5a1ca3a8ecd0c0e990915e08c396ecbbcfdf345c3e40363c3e"} Dec 13 07:06:10 crc kubenswrapper[5048]: I1213 07:06:10.968834 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.056701 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-neutron-ovn-metadata-agent-neutron-config-0\") pod \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.056751 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5nqgq\" (UniqueName: \"kubernetes.io/projected/967349d1-5d27-480d-8e31-2eaa33e3c7e0-kube-api-access-5nqgq\") pod \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.056796 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-ssh-key\") pod \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.056845 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-neutron-metadata-combined-ca-bundle\") pod \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.056921 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-inventory\") pod \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.056941 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-nova-metadata-neutron-config-0\") pod \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\" (UID: \"967349d1-5d27-480d-8e31-2eaa33e3c7e0\") " Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.063034 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/967349d1-5d27-480d-8e31-2eaa33e3c7e0-kube-api-access-5nqgq" (OuterVolumeSpecName: "kube-api-access-5nqgq") pod "967349d1-5d27-480d-8e31-2eaa33e3c7e0" (UID: "967349d1-5d27-480d-8e31-2eaa33e3c7e0"). InnerVolumeSpecName "kube-api-access-5nqgq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.068535 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "967349d1-5d27-480d-8e31-2eaa33e3c7e0" (UID: "967349d1-5d27-480d-8e31-2eaa33e3c7e0"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.092312 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "967349d1-5d27-480d-8e31-2eaa33e3c7e0" (UID: "967349d1-5d27-480d-8e31-2eaa33e3c7e0"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.094104 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-inventory" (OuterVolumeSpecName: "inventory") pod "967349d1-5d27-480d-8e31-2eaa33e3c7e0" (UID: "967349d1-5d27-480d-8e31-2eaa33e3c7e0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.096980 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "967349d1-5d27-480d-8e31-2eaa33e3c7e0" (UID: "967349d1-5d27-480d-8e31-2eaa33e3c7e0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.097416 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "967349d1-5d27-480d-8e31-2eaa33e3c7e0" (UID: "967349d1-5d27-480d-8e31-2eaa33e3c7e0"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.158970 5048 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.159007 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5nqgq\" (UniqueName: \"kubernetes.io/projected/967349d1-5d27-480d-8e31-2eaa33e3c7e0-kube-api-access-5nqgq\") on node \"crc\" DevicePath \"\"" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.159023 5048 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.159041 5048 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.159052 5048 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-inventory\") on node \"crc\" DevicePath \"\"" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.159064 5048 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/967349d1-5d27-480d-8e31-2eaa33e3c7e0-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.540918 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" event={"ID":"967349d1-5d27-480d-8e31-2eaa33e3c7e0","Type":"ContainerDied","Data":"d4780195b699857348bfd505ce22fc905d91ffb794b9704f6dfbaea68b00f277"} Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.540967 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d4780195b699857348bfd505ce22fc905d91ffb794b9704f6dfbaea68b00f277" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.541006 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.637191 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k"] Dec 13 07:06:11 crc kubenswrapper[5048]: E1213 07:06:11.637713 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="967349d1-5d27-480d-8e31-2eaa33e3c7e0" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.637740 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="967349d1-5d27-480d-8e31-2eaa33e3c7e0" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.637969 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="967349d1-5d27-480d-8e31-2eaa33e3c7e0" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.638815 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.641153 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hgp7p" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.641414 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.641580 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.641730 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.641861 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.651472 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k"] Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.667306 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-47k9k\" (UID: \"482402ba-adeb-4175-911a-2ab863e44d4e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.667420 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-47k9k\" (UID: \"482402ba-adeb-4175-911a-2ab863e44d4e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.667479 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dqmx\" (UniqueName: \"kubernetes.io/projected/482402ba-adeb-4175-911a-2ab863e44d4e-kube-api-access-6dqmx\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-47k9k\" (UID: \"482402ba-adeb-4175-911a-2ab863e44d4e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.667535 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-47k9k\" (UID: \"482402ba-adeb-4175-911a-2ab863e44d4e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.667612 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-47k9k\" (UID: \"482402ba-adeb-4175-911a-2ab863e44d4e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.769412 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-47k9k\" (UID: \"482402ba-adeb-4175-911a-2ab863e44d4e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.769496 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dqmx\" (UniqueName: \"kubernetes.io/projected/482402ba-adeb-4175-911a-2ab863e44d4e-kube-api-access-6dqmx\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-47k9k\" (UID: \"482402ba-adeb-4175-911a-2ab863e44d4e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.769548 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-47k9k\" (UID: \"482402ba-adeb-4175-911a-2ab863e44d4e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.769606 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-47k9k\" (UID: \"482402ba-adeb-4175-911a-2ab863e44d4e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.769696 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-47k9k\" (UID: \"482402ba-adeb-4175-911a-2ab863e44d4e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.776026 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-47k9k\" (UID: \"482402ba-adeb-4175-911a-2ab863e44d4e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.776100 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-47k9k\" (UID: \"482402ba-adeb-4175-911a-2ab863e44d4e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.776334 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-47k9k\" (UID: \"482402ba-adeb-4175-911a-2ab863e44d4e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.787066 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-47k9k\" (UID: \"482402ba-adeb-4175-911a-2ab863e44d4e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.828883 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dqmx\" (UniqueName: \"kubernetes.io/projected/482402ba-adeb-4175-911a-2ab863e44d4e-kube-api-access-6dqmx\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-47k9k\" (UID: \"482402ba-adeb-4175-911a-2ab863e44d4e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" Dec 13 07:06:11 crc kubenswrapper[5048]: I1213 07:06:11.956039 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" Dec 13 07:06:12 crc kubenswrapper[5048]: I1213 07:06:12.446193 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k"] Dec 13 07:06:12 crc kubenswrapper[5048]: I1213 07:06:12.550451 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" event={"ID":"482402ba-adeb-4175-911a-2ab863e44d4e","Type":"ContainerStarted","Data":"4e8d64f62348f60791f05567cb49942a25d14938c18ebfacc51a58b983f681a5"} Dec 13 07:06:13 crc kubenswrapper[5048]: I1213 07:06:13.560221 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" event={"ID":"482402ba-adeb-4175-911a-2ab863e44d4e","Type":"ContainerStarted","Data":"12fe4ef85bd88027ced980cc2a19a1aec21804f45031346ceff0845904857d63"} Dec 13 07:06:13 crc kubenswrapper[5048]: I1213 07:06:13.586830 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" podStartSLOduration=1.93194527 podStartE2EDuration="2.58680654s" podCreationTimestamp="2025-12-13 07:06:11 +0000 UTC" firstStartedPulling="2025-12-13 07:06:12.453619309 +0000 UTC m=+2206.320213890" lastFinishedPulling="2025-12-13 07:06:13.108480579 +0000 UTC m=+2206.975075160" observedRunningTime="2025-12-13 07:06:13.576775489 +0000 UTC m=+2207.443370090" watchObservedRunningTime="2025-12-13 07:06:13.58680654 +0000 UTC m=+2207.453401121" Dec 13 07:06:29 crc kubenswrapper[5048]: I1213 07:06:29.024558 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bqbcv"] Dec 13 07:06:29 crc kubenswrapper[5048]: I1213 07:06:29.026908 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bqbcv" Dec 13 07:06:29 crc kubenswrapper[5048]: I1213 07:06:29.041990 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bqbcv"] Dec 13 07:06:29 crc kubenswrapper[5048]: I1213 07:06:29.054334 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7272cedc-9fcb-4482-be6b-df09c09f5541-catalog-content\") pod \"community-operators-bqbcv\" (UID: \"7272cedc-9fcb-4482-be6b-df09c09f5541\") " pod="openshift-marketplace/community-operators-bqbcv" Dec 13 07:06:29 crc kubenswrapper[5048]: I1213 07:06:29.054588 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ng76t\" (UniqueName: \"kubernetes.io/projected/7272cedc-9fcb-4482-be6b-df09c09f5541-kube-api-access-ng76t\") pod \"community-operators-bqbcv\" (UID: \"7272cedc-9fcb-4482-be6b-df09c09f5541\") " pod="openshift-marketplace/community-operators-bqbcv" Dec 13 07:06:29 crc kubenswrapper[5048]: I1213 07:06:29.054718 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7272cedc-9fcb-4482-be6b-df09c09f5541-utilities\") pod \"community-operators-bqbcv\" (UID: \"7272cedc-9fcb-4482-be6b-df09c09f5541\") " pod="openshift-marketplace/community-operators-bqbcv" Dec 13 07:06:29 crc kubenswrapper[5048]: I1213 07:06:29.157064 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ng76t\" (UniqueName: \"kubernetes.io/projected/7272cedc-9fcb-4482-be6b-df09c09f5541-kube-api-access-ng76t\") pod \"community-operators-bqbcv\" (UID: \"7272cedc-9fcb-4482-be6b-df09c09f5541\") " pod="openshift-marketplace/community-operators-bqbcv" Dec 13 07:06:29 crc kubenswrapper[5048]: I1213 07:06:29.157144 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7272cedc-9fcb-4482-be6b-df09c09f5541-utilities\") pod \"community-operators-bqbcv\" (UID: \"7272cedc-9fcb-4482-be6b-df09c09f5541\") " pod="openshift-marketplace/community-operators-bqbcv" Dec 13 07:06:29 crc kubenswrapper[5048]: I1213 07:06:29.157248 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7272cedc-9fcb-4482-be6b-df09c09f5541-catalog-content\") pod \"community-operators-bqbcv\" (UID: \"7272cedc-9fcb-4482-be6b-df09c09f5541\") " pod="openshift-marketplace/community-operators-bqbcv" Dec 13 07:06:29 crc kubenswrapper[5048]: I1213 07:06:29.157700 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7272cedc-9fcb-4482-be6b-df09c09f5541-utilities\") pod \"community-operators-bqbcv\" (UID: \"7272cedc-9fcb-4482-be6b-df09c09f5541\") " pod="openshift-marketplace/community-operators-bqbcv" Dec 13 07:06:29 crc kubenswrapper[5048]: I1213 07:06:29.157777 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7272cedc-9fcb-4482-be6b-df09c09f5541-catalog-content\") pod \"community-operators-bqbcv\" (UID: \"7272cedc-9fcb-4482-be6b-df09c09f5541\") " pod="openshift-marketplace/community-operators-bqbcv" Dec 13 07:06:29 crc kubenswrapper[5048]: I1213 07:06:29.179160 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ng76t\" (UniqueName: \"kubernetes.io/projected/7272cedc-9fcb-4482-be6b-df09c09f5541-kube-api-access-ng76t\") pod \"community-operators-bqbcv\" (UID: \"7272cedc-9fcb-4482-be6b-df09c09f5541\") " pod="openshift-marketplace/community-operators-bqbcv" Dec 13 07:06:29 crc kubenswrapper[5048]: I1213 07:06:29.357378 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bqbcv" Dec 13 07:06:29 crc kubenswrapper[5048]: I1213 07:06:29.904580 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bqbcv"] Dec 13 07:06:30 crc kubenswrapper[5048]: I1213 07:06:30.719643 5048 generic.go:334] "Generic (PLEG): container finished" podID="7272cedc-9fcb-4482-be6b-df09c09f5541" containerID="b81985f728d920290cf6e465070f88514c4e716d9c4d7f50bd583c13cb14c5cc" exitCode=0 Dec 13 07:06:30 crc kubenswrapper[5048]: I1213 07:06:30.719913 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bqbcv" event={"ID":"7272cedc-9fcb-4482-be6b-df09c09f5541","Type":"ContainerDied","Data":"b81985f728d920290cf6e465070f88514c4e716d9c4d7f50bd583c13cb14c5cc"} Dec 13 07:06:30 crc kubenswrapper[5048]: I1213 07:06:30.719982 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bqbcv" event={"ID":"7272cedc-9fcb-4482-be6b-df09c09f5541","Type":"ContainerStarted","Data":"68eef49359992a429cddaa39fb67163b67c724521a54d71b493186b9f94cd5e2"} Dec 13 07:06:31 crc kubenswrapper[5048]: I1213 07:06:31.729177 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bqbcv" event={"ID":"7272cedc-9fcb-4482-be6b-df09c09f5541","Type":"ContainerStarted","Data":"696ef63170affeaed8e0a8c29f31d409ff4630a205243e8e05033cbcd336cf28"} Dec 13 07:06:32 crc kubenswrapper[5048]: I1213 07:06:32.741762 5048 generic.go:334] "Generic (PLEG): container finished" podID="7272cedc-9fcb-4482-be6b-df09c09f5541" containerID="696ef63170affeaed8e0a8c29f31d409ff4630a205243e8e05033cbcd336cf28" exitCode=0 Dec 13 07:06:32 crc kubenswrapper[5048]: I1213 07:06:32.741806 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bqbcv" event={"ID":"7272cedc-9fcb-4482-be6b-df09c09f5541","Type":"ContainerDied","Data":"696ef63170affeaed8e0a8c29f31d409ff4630a205243e8e05033cbcd336cf28"} Dec 13 07:06:34 crc kubenswrapper[5048]: I1213 07:06:34.776717 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bqbcv" event={"ID":"7272cedc-9fcb-4482-be6b-df09c09f5541","Type":"ContainerStarted","Data":"a5cd756879c2fc5e70d4e9a01c1248c3ed5ad2ca653a49a6c99445586206e701"} Dec 13 07:06:34 crc kubenswrapper[5048]: I1213 07:06:34.815907 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bqbcv" podStartSLOduration=2.8003541629999997 podStartE2EDuration="5.81589073s" podCreationTimestamp="2025-12-13 07:06:29 +0000 UTC" firstStartedPulling="2025-12-13 07:06:30.72157779 +0000 UTC m=+2224.588172371" lastFinishedPulling="2025-12-13 07:06:33.737114357 +0000 UTC m=+2227.603708938" observedRunningTime="2025-12-13 07:06:34.811280446 +0000 UTC m=+2228.677875037" watchObservedRunningTime="2025-12-13 07:06:34.81589073 +0000 UTC m=+2228.682485301" Dec 13 07:06:39 crc kubenswrapper[5048]: I1213 07:06:39.399134 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bqbcv" Dec 13 07:06:39 crc kubenswrapper[5048]: I1213 07:06:39.400281 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bqbcv" Dec 13 07:06:39 crc kubenswrapper[5048]: I1213 07:06:39.450167 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bqbcv" Dec 13 07:06:39 crc kubenswrapper[5048]: I1213 07:06:39.860795 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bqbcv" Dec 13 07:06:39 crc kubenswrapper[5048]: I1213 07:06:39.909305 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bqbcv"] Dec 13 07:06:41 crc kubenswrapper[5048]: I1213 07:06:41.840206 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bqbcv" podUID="7272cedc-9fcb-4482-be6b-df09c09f5541" containerName="registry-server" containerID="cri-o://a5cd756879c2fc5e70d4e9a01c1248c3ed5ad2ca653a49a6c99445586206e701" gracePeriod=2 Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.294944 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bqbcv" Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.496055 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7272cedc-9fcb-4482-be6b-df09c09f5541-utilities\") pod \"7272cedc-9fcb-4482-be6b-df09c09f5541\" (UID: \"7272cedc-9fcb-4482-be6b-df09c09f5541\") " Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.496114 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7272cedc-9fcb-4482-be6b-df09c09f5541-catalog-content\") pod \"7272cedc-9fcb-4482-be6b-df09c09f5541\" (UID: \"7272cedc-9fcb-4482-be6b-df09c09f5541\") " Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.496326 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ng76t\" (UniqueName: \"kubernetes.io/projected/7272cedc-9fcb-4482-be6b-df09c09f5541-kube-api-access-ng76t\") pod \"7272cedc-9fcb-4482-be6b-df09c09f5541\" (UID: \"7272cedc-9fcb-4482-be6b-df09c09f5541\") " Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.497578 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7272cedc-9fcb-4482-be6b-df09c09f5541-utilities" (OuterVolumeSpecName: "utilities") pod "7272cedc-9fcb-4482-be6b-df09c09f5541" (UID: "7272cedc-9fcb-4482-be6b-df09c09f5541"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.502557 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7272cedc-9fcb-4482-be6b-df09c09f5541-kube-api-access-ng76t" (OuterVolumeSpecName: "kube-api-access-ng76t") pod "7272cedc-9fcb-4482-be6b-df09c09f5541" (UID: "7272cedc-9fcb-4482-be6b-df09c09f5541"). InnerVolumeSpecName "kube-api-access-ng76t". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.556968 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7272cedc-9fcb-4482-be6b-df09c09f5541-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7272cedc-9fcb-4482-be6b-df09c09f5541" (UID: "7272cedc-9fcb-4482-be6b-df09c09f5541"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.599313 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7272cedc-9fcb-4482-be6b-df09c09f5541-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.599361 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7272cedc-9fcb-4482-be6b-df09c09f5541-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.599376 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ng76t\" (UniqueName: \"kubernetes.io/projected/7272cedc-9fcb-4482-be6b-df09c09f5541-kube-api-access-ng76t\") on node \"crc\" DevicePath \"\"" Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.857335 5048 generic.go:334] "Generic (PLEG): container finished" podID="7272cedc-9fcb-4482-be6b-df09c09f5541" containerID="a5cd756879c2fc5e70d4e9a01c1248c3ed5ad2ca653a49a6c99445586206e701" exitCode=0 Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.857384 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bqbcv" event={"ID":"7272cedc-9fcb-4482-be6b-df09c09f5541","Type":"ContainerDied","Data":"a5cd756879c2fc5e70d4e9a01c1248c3ed5ad2ca653a49a6c99445586206e701"} Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.857417 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bqbcv" event={"ID":"7272cedc-9fcb-4482-be6b-df09c09f5541","Type":"ContainerDied","Data":"68eef49359992a429cddaa39fb67163b67c724521a54d71b493186b9f94cd5e2"} Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.857419 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bqbcv" Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.857453 5048 scope.go:117] "RemoveContainer" containerID="a5cd756879c2fc5e70d4e9a01c1248c3ed5ad2ca653a49a6c99445586206e701" Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.884955 5048 scope.go:117] "RemoveContainer" containerID="696ef63170affeaed8e0a8c29f31d409ff4630a205243e8e05033cbcd336cf28" Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.887928 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bqbcv"] Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.897882 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bqbcv"] Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.911605 5048 scope.go:117] "RemoveContainer" containerID="b81985f728d920290cf6e465070f88514c4e716d9c4d7f50bd583c13cb14c5cc" Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.960488 5048 scope.go:117] "RemoveContainer" containerID="a5cd756879c2fc5e70d4e9a01c1248c3ed5ad2ca653a49a6c99445586206e701" Dec 13 07:06:42 crc kubenswrapper[5048]: E1213 07:06:42.961025 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5cd756879c2fc5e70d4e9a01c1248c3ed5ad2ca653a49a6c99445586206e701\": container with ID starting with a5cd756879c2fc5e70d4e9a01c1248c3ed5ad2ca653a49a6c99445586206e701 not found: ID does not exist" containerID="a5cd756879c2fc5e70d4e9a01c1248c3ed5ad2ca653a49a6c99445586206e701" Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.961091 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5cd756879c2fc5e70d4e9a01c1248c3ed5ad2ca653a49a6c99445586206e701"} err="failed to get container status \"a5cd756879c2fc5e70d4e9a01c1248c3ed5ad2ca653a49a6c99445586206e701\": rpc error: code = NotFound desc = could not find container \"a5cd756879c2fc5e70d4e9a01c1248c3ed5ad2ca653a49a6c99445586206e701\": container with ID starting with a5cd756879c2fc5e70d4e9a01c1248c3ed5ad2ca653a49a6c99445586206e701 not found: ID does not exist" Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.961177 5048 scope.go:117] "RemoveContainer" containerID="696ef63170affeaed8e0a8c29f31d409ff4630a205243e8e05033cbcd336cf28" Dec 13 07:06:42 crc kubenswrapper[5048]: E1213 07:06:42.961976 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"696ef63170affeaed8e0a8c29f31d409ff4630a205243e8e05033cbcd336cf28\": container with ID starting with 696ef63170affeaed8e0a8c29f31d409ff4630a205243e8e05033cbcd336cf28 not found: ID does not exist" containerID="696ef63170affeaed8e0a8c29f31d409ff4630a205243e8e05033cbcd336cf28" Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.962016 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"696ef63170affeaed8e0a8c29f31d409ff4630a205243e8e05033cbcd336cf28"} err="failed to get container status \"696ef63170affeaed8e0a8c29f31d409ff4630a205243e8e05033cbcd336cf28\": rpc error: code = NotFound desc = could not find container \"696ef63170affeaed8e0a8c29f31d409ff4630a205243e8e05033cbcd336cf28\": container with ID starting with 696ef63170affeaed8e0a8c29f31d409ff4630a205243e8e05033cbcd336cf28 not found: ID does not exist" Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.962042 5048 scope.go:117] "RemoveContainer" containerID="b81985f728d920290cf6e465070f88514c4e716d9c4d7f50bd583c13cb14c5cc" Dec 13 07:06:42 crc kubenswrapper[5048]: E1213 07:06:42.962345 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b81985f728d920290cf6e465070f88514c4e716d9c4d7f50bd583c13cb14c5cc\": container with ID starting with b81985f728d920290cf6e465070f88514c4e716d9c4d7f50bd583c13cb14c5cc not found: ID does not exist" containerID="b81985f728d920290cf6e465070f88514c4e716d9c4d7f50bd583c13cb14c5cc" Dec 13 07:06:42 crc kubenswrapper[5048]: I1213 07:06:42.962380 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b81985f728d920290cf6e465070f88514c4e716d9c4d7f50bd583c13cb14c5cc"} err="failed to get container status \"b81985f728d920290cf6e465070f88514c4e716d9c4d7f50bd583c13cb14c5cc\": rpc error: code = NotFound desc = could not find container \"b81985f728d920290cf6e465070f88514c4e716d9c4d7f50bd583c13cb14c5cc\": container with ID starting with b81985f728d920290cf6e465070f88514c4e716d9c4d7f50bd583c13cb14c5cc not found: ID does not exist" Dec 13 07:06:44 crc kubenswrapper[5048]: I1213 07:06:44.577847 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7272cedc-9fcb-4482-be6b-df09c09f5541" path="/var/lib/kubelet/pods/7272cedc-9fcb-4482-be6b-df09c09f5541/volumes" Dec 13 07:07:01 crc kubenswrapper[5048]: I1213 07:07:01.045179 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6vfmc"] Dec 13 07:07:01 crc kubenswrapper[5048]: E1213 07:07:01.046299 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7272cedc-9fcb-4482-be6b-df09c09f5541" containerName="extract-content" Dec 13 07:07:01 crc kubenswrapper[5048]: I1213 07:07:01.046316 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="7272cedc-9fcb-4482-be6b-df09c09f5541" containerName="extract-content" Dec 13 07:07:01 crc kubenswrapper[5048]: E1213 07:07:01.046337 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7272cedc-9fcb-4482-be6b-df09c09f5541" containerName="registry-server" Dec 13 07:07:01 crc kubenswrapper[5048]: I1213 07:07:01.046345 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="7272cedc-9fcb-4482-be6b-df09c09f5541" containerName="registry-server" Dec 13 07:07:01 crc kubenswrapper[5048]: E1213 07:07:01.046363 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7272cedc-9fcb-4482-be6b-df09c09f5541" containerName="extract-utilities" Dec 13 07:07:01 crc kubenswrapper[5048]: I1213 07:07:01.046372 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="7272cedc-9fcb-4482-be6b-df09c09f5541" containerName="extract-utilities" Dec 13 07:07:01 crc kubenswrapper[5048]: I1213 07:07:01.046619 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="7272cedc-9fcb-4482-be6b-df09c09f5541" containerName="registry-server" Dec 13 07:07:01 crc kubenswrapper[5048]: I1213 07:07:01.048236 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6vfmc" Dec 13 07:07:01 crc kubenswrapper[5048]: I1213 07:07:01.076841 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6vfmc"] Dec 13 07:07:01 crc kubenswrapper[5048]: I1213 07:07:01.143725 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fl5h\" (UniqueName: \"kubernetes.io/projected/3532860c-739d-417c-a173-f594bd4c073e-kube-api-access-7fl5h\") pod \"redhat-marketplace-6vfmc\" (UID: \"3532860c-739d-417c-a173-f594bd4c073e\") " pod="openshift-marketplace/redhat-marketplace-6vfmc" Dec 13 07:07:01 crc kubenswrapper[5048]: I1213 07:07:01.143952 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3532860c-739d-417c-a173-f594bd4c073e-catalog-content\") pod \"redhat-marketplace-6vfmc\" (UID: \"3532860c-739d-417c-a173-f594bd4c073e\") " pod="openshift-marketplace/redhat-marketplace-6vfmc" Dec 13 07:07:01 crc kubenswrapper[5048]: I1213 07:07:01.144006 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3532860c-739d-417c-a173-f594bd4c073e-utilities\") pod \"redhat-marketplace-6vfmc\" (UID: \"3532860c-739d-417c-a173-f594bd4c073e\") " pod="openshift-marketplace/redhat-marketplace-6vfmc" Dec 13 07:07:01 crc kubenswrapper[5048]: I1213 07:07:01.245664 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3532860c-739d-417c-a173-f594bd4c073e-catalog-content\") pod \"redhat-marketplace-6vfmc\" (UID: \"3532860c-739d-417c-a173-f594bd4c073e\") " pod="openshift-marketplace/redhat-marketplace-6vfmc" Dec 13 07:07:01 crc kubenswrapper[5048]: I1213 07:07:01.245705 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3532860c-739d-417c-a173-f594bd4c073e-utilities\") pod \"redhat-marketplace-6vfmc\" (UID: \"3532860c-739d-417c-a173-f594bd4c073e\") " pod="openshift-marketplace/redhat-marketplace-6vfmc" Dec 13 07:07:01 crc kubenswrapper[5048]: I1213 07:07:01.245825 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fl5h\" (UniqueName: \"kubernetes.io/projected/3532860c-739d-417c-a173-f594bd4c073e-kube-api-access-7fl5h\") pod \"redhat-marketplace-6vfmc\" (UID: \"3532860c-739d-417c-a173-f594bd4c073e\") " pod="openshift-marketplace/redhat-marketplace-6vfmc" Dec 13 07:07:01 crc kubenswrapper[5048]: I1213 07:07:01.246201 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3532860c-739d-417c-a173-f594bd4c073e-catalog-content\") pod \"redhat-marketplace-6vfmc\" (UID: \"3532860c-739d-417c-a173-f594bd4c073e\") " pod="openshift-marketplace/redhat-marketplace-6vfmc" Dec 13 07:07:01 crc kubenswrapper[5048]: I1213 07:07:01.246334 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3532860c-739d-417c-a173-f594bd4c073e-utilities\") pod \"redhat-marketplace-6vfmc\" (UID: \"3532860c-739d-417c-a173-f594bd4c073e\") " pod="openshift-marketplace/redhat-marketplace-6vfmc" Dec 13 07:07:01 crc kubenswrapper[5048]: I1213 07:07:01.267996 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fl5h\" (UniqueName: \"kubernetes.io/projected/3532860c-739d-417c-a173-f594bd4c073e-kube-api-access-7fl5h\") pod \"redhat-marketplace-6vfmc\" (UID: \"3532860c-739d-417c-a173-f594bd4c073e\") " pod="openshift-marketplace/redhat-marketplace-6vfmc" Dec 13 07:07:01 crc kubenswrapper[5048]: I1213 07:07:01.378744 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6vfmc" Dec 13 07:07:01 crc kubenswrapper[5048]: I1213 07:07:01.836808 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6vfmc"] Dec 13 07:07:02 crc kubenswrapper[5048]: I1213 07:07:02.038741 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6vfmc" event={"ID":"3532860c-739d-417c-a173-f594bd4c073e","Type":"ContainerStarted","Data":"5b3a5f71fe5a319e376550ca047bbd4fb646950eaaedc55f1c7e6d30273e553f"} Dec 13 07:07:03 crc kubenswrapper[5048]: I1213 07:07:03.088762 5048 generic.go:334] "Generic (PLEG): container finished" podID="3532860c-739d-417c-a173-f594bd4c073e" containerID="a8e78dce39c95981adae2c4e770d70835855eecbaf416ce7b52165884e828bc2" exitCode=0 Dec 13 07:07:03 crc kubenswrapper[5048]: I1213 07:07:03.088795 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6vfmc" event={"ID":"3532860c-739d-417c-a173-f594bd4c073e","Type":"ContainerDied","Data":"a8e78dce39c95981adae2c4e770d70835855eecbaf416ce7b52165884e828bc2"} Dec 13 07:07:05 crc kubenswrapper[5048]: I1213 07:07:05.109552 5048 generic.go:334] "Generic (PLEG): container finished" podID="3532860c-739d-417c-a173-f594bd4c073e" containerID="9b16e63f17061444100f4f989c7d36c108f7826c8bfa23c64d5e2bd23caab1c6" exitCode=0 Dec 13 07:07:05 crc kubenswrapper[5048]: I1213 07:07:05.109735 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6vfmc" event={"ID":"3532860c-739d-417c-a173-f594bd4c073e","Type":"ContainerDied","Data":"9b16e63f17061444100f4f989c7d36c108f7826c8bfa23c64d5e2bd23caab1c6"} Dec 13 07:07:07 crc kubenswrapper[5048]: I1213 07:07:07.127681 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6vfmc" event={"ID":"3532860c-739d-417c-a173-f594bd4c073e","Type":"ContainerStarted","Data":"0881f2d6a52922a1ef0a35c50c14741ed72b7295ad6b57cdac2932242f5962bb"} Dec 13 07:07:07 crc kubenswrapper[5048]: I1213 07:07:07.152370 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6vfmc" podStartSLOduration=3.4134587769999998 podStartE2EDuration="6.152353792s" podCreationTimestamp="2025-12-13 07:07:01 +0000 UTC" firstStartedPulling="2025-12-13 07:07:03.090658962 +0000 UTC m=+2256.957253543" lastFinishedPulling="2025-12-13 07:07:05.829553977 +0000 UTC m=+2259.696148558" observedRunningTime="2025-12-13 07:07:07.149766932 +0000 UTC m=+2261.016361513" watchObservedRunningTime="2025-12-13 07:07:07.152353792 +0000 UTC m=+2261.018948373" Dec 13 07:07:11 crc kubenswrapper[5048]: I1213 07:07:11.379078 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6vfmc" Dec 13 07:07:11 crc kubenswrapper[5048]: I1213 07:07:11.379598 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6vfmc" Dec 13 07:07:11 crc kubenswrapper[5048]: I1213 07:07:11.444563 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6vfmc" Dec 13 07:07:12 crc kubenswrapper[5048]: I1213 07:07:12.231217 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6vfmc" Dec 13 07:07:12 crc kubenswrapper[5048]: I1213 07:07:12.632408 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6vfmc"] Dec 13 07:07:14 crc kubenswrapper[5048]: I1213 07:07:14.189921 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6vfmc" podUID="3532860c-739d-417c-a173-f594bd4c073e" containerName="registry-server" containerID="cri-o://0881f2d6a52922a1ef0a35c50c14741ed72b7295ad6b57cdac2932242f5962bb" gracePeriod=2 Dec 13 07:07:14 crc kubenswrapper[5048]: I1213 07:07:14.965272 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6vfmc" Dec 13 07:07:14 crc kubenswrapper[5048]: I1213 07:07:14.988564 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3532860c-739d-417c-a173-f594bd4c073e-catalog-content\") pod \"3532860c-739d-417c-a173-f594bd4c073e\" (UID: \"3532860c-739d-417c-a173-f594bd4c073e\") " Dec 13 07:07:14 crc kubenswrapper[5048]: I1213 07:07:14.988692 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3532860c-739d-417c-a173-f594bd4c073e-utilities\") pod \"3532860c-739d-417c-a173-f594bd4c073e\" (UID: \"3532860c-739d-417c-a173-f594bd4c073e\") " Dec 13 07:07:14 crc kubenswrapper[5048]: I1213 07:07:14.988775 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7fl5h\" (UniqueName: \"kubernetes.io/projected/3532860c-739d-417c-a173-f594bd4c073e-kube-api-access-7fl5h\") pod \"3532860c-739d-417c-a173-f594bd4c073e\" (UID: \"3532860c-739d-417c-a173-f594bd4c073e\") " Dec 13 07:07:14 crc kubenswrapper[5048]: I1213 07:07:14.989417 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3532860c-739d-417c-a173-f594bd4c073e-utilities" (OuterVolumeSpecName: "utilities") pod "3532860c-739d-417c-a173-f594bd4c073e" (UID: "3532860c-739d-417c-a173-f594bd4c073e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:07:14 crc kubenswrapper[5048]: I1213 07:07:14.994244 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3532860c-739d-417c-a173-f594bd4c073e-kube-api-access-7fl5h" (OuterVolumeSpecName: "kube-api-access-7fl5h") pod "3532860c-739d-417c-a173-f594bd4c073e" (UID: "3532860c-739d-417c-a173-f594bd4c073e"). InnerVolumeSpecName "kube-api-access-7fl5h". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:07:15 crc kubenswrapper[5048]: I1213 07:07:15.170883 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3532860c-739d-417c-a173-f594bd4c073e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3532860c-739d-417c-a173-f594bd4c073e" (UID: "3532860c-739d-417c-a173-f594bd4c073e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:07:15 crc kubenswrapper[5048]: I1213 07:07:15.172935 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3532860c-739d-417c-a173-f594bd4c073e-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 07:07:15 crc kubenswrapper[5048]: I1213 07:07:15.175195 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7fl5h\" (UniqueName: \"kubernetes.io/projected/3532860c-739d-417c-a173-f594bd4c073e-kube-api-access-7fl5h\") on node \"crc\" DevicePath \"\"" Dec 13 07:07:15 crc kubenswrapper[5048]: I1213 07:07:15.199105 5048 generic.go:334] "Generic (PLEG): container finished" podID="3532860c-739d-417c-a173-f594bd4c073e" containerID="0881f2d6a52922a1ef0a35c50c14741ed72b7295ad6b57cdac2932242f5962bb" exitCode=0 Dec 13 07:07:15 crc kubenswrapper[5048]: I1213 07:07:15.199190 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6vfmc" Dec 13 07:07:15 crc kubenswrapper[5048]: I1213 07:07:15.199192 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6vfmc" event={"ID":"3532860c-739d-417c-a173-f594bd4c073e","Type":"ContainerDied","Data":"0881f2d6a52922a1ef0a35c50c14741ed72b7295ad6b57cdac2932242f5962bb"} Dec 13 07:07:15 crc kubenswrapper[5048]: I1213 07:07:15.199352 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6vfmc" event={"ID":"3532860c-739d-417c-a173-f594bd4c073e","Type":"ContainerDied","Data":"5b3a5f71fe5a319e376550ca047bbd4fb646950eaaedc55f1c7e6d30273e553f"} Dec 13 07:07:15 crc kubenswrapper[5048]: I1213 07:07:15.199372 5048 scope.go:117] "RemoveContainer" containerID="0881f2d6a52922a1ef0a35c50c14741ed72b7295ad6b57cdac2932242f5962bb" Dec 13 07:07:15 crc kubenswrapper[5048]: I1213 07:07:15.222124 5048 scope.go:117] "RemoveContainer" containerID="9b16e63f17061444100f4f989c7d36c108f7826c8bfa23c64d5e2bd23caab1c6" Dec 13 07:07:15 crc kubenswrapper[5048]: I1213 07:07:15.241850 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6vfmc"] Dec 13 07:07:15 crc kubenswrapper[5048]: I1213 07:07:15.246693 5048 scope.go:117] "RemoveContainer" containerID="a8e78dce39c95981adae2c4e770d70835855eecbaf416ce7b52165884e828bc2" Dec 13 07:07:15 crc kubenswrapper[5048]: I1213 07:07:15.249308 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6vfmc"] Dec 13 07:07:15 crc kubenswrapper[5048]: I1213 07:07:15.278248 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3532860c-739d-417c-a173-f594bd4c073e-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 07:07:15 crc kubenswrapper[5048]: I1213 07:07:15.295425 5048 scope.go:117] "RemoveContainer" containerID="0881f2d6a52922a1ef0a35c50c14741ed72b7295ad6b57cdac2932242f5962bb" Dec 13 07:07:15 crc kubenswrapper[5048]: E1213 07:07:15.296013 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0881f2d6a52922a1ef0a35c50c14741ed72b7295ad6b57cdac2932242f5962bb\": container with ID starting with 0881f2d6a52922a1ef0a35c50c14741ed72b7295ad6b57cdac2932242f5962bb not found: ID does not exist" containerID="0881f2d6a52922a1ef0a35c50c14741ed72b7295ad6b57cdac2932242f5962bb" Dec 13 07:07:15 crc kubenswrapper[5048]: I1213 07:07:15.296123 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0881f2d6a52922a1ef0a35c50c14741ed72b7295ad6b57cdac2932242f5962bb"} err="failed to get container status \"0881f2d6a52922a1ef0a35c50c14741ed72b7295ad6b57cdac2932242f5962bb\": rpc error: code = NotFound desc = could not find container \"0881f2d6a52922a1ef0a35c50c14741ed72b7295ad6b57cdac2932242f5962bb\": container with ID starting with 0881f2d6a52922a1ef0a35c50c14741ed72b7295ad6b57cdac2932242f5962bb not found: ID does not exist" Dec 13 07:07:15 crc kubenswrapper[5048]: I1213 07:07:15.296217 5048 scope.go:117] "RemoveContainer" containerID="9b16e63f17061444100f4f989c7d36c108f7826c8bfa23c64d5e2bd23caab1c6" Dec 13 07:07:15 crc kubenswrapper[5048]: E1213 07:07:15.296703 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b16e63f17061444100f4f989c7d36c108f7826c8bfa23c64d5e2bd23caab1c6\": container with ID starting with 9b16e63f17061444100f4f989c7d36c108f7826c8bfa23c64d5e2bd23caab1c6 not found: ID does not exist" containerID="9b16e63f17061444100f4f989c7d36c108f7826c8bfa23c64d5e2bd23caab1c6" Dec 13 07:07:15 crc kubenswrapper[5048]: I1213 07:07:15.296738 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b16e63f17061444100f4f989c7d36c108f7826c8bfa23c64d5e2bd23caab1c6"} err="failed to get container status \"9b16e63f17061444100f4f989c7d36c108f7826c8bfa23c64d5e2bd23caab1c6\": rpc error: code = NotFound desc = could not find container \"9b16e63f17061444100f4f989c7d36c108f7826c8bfa23c64d5e2bd23caab1c6\": container with ID starting with 9b16e63f17061444100f4f989c7d36c108f7826c8bfa23c64d5e2bd23caab1c6 not found: ID does not exist" Dec 13 07:07:15 crc kubenswrapper[5048]: I1213 07:07:15.296760 5048 scope.go:117] "RemoveContainer" containerID="a8e78dce39c95981adae2c4e770d70835855eecbaf416ce7b52165884e828bc2" Dec 13 07:07:15 crc kubenswrapper[5048]: E1213 07:07:15.297087 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8e78dce39c95981adae2c4e770d70835855eecbaf416ce7b52165884e828bc2\": container with ID starting with a8e78dce39c95981adae2c4e770d70835855eecbaf416ce7b52165884e828bc2 not found: ID does not exist" containerID="a8e78dce39c95981adae2c4e770d70835855eecbaf416ce7b52165884e828bc2" Dec 13 07:07:15 crc kubenswrapper[5048]: I1213 07:07:15.297183 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8e78dce39c95981adae2c4e770d70835855eecbaf416ce7b52165884e828bc2"} err="failed to get container status \"a8e78dce39c95981adae2c4e770d70835855eecbaf416ce7b52165884e828bc2\": rpc error: code = NotFound desc = could not find container \"a8e78dce39c95981adae2c4e770d70835855eecbaf416ce7b52165884e828bc2\": container with ID starting with a8e78dce39c95981adae2c4e770d70835855eecbaf416ce7b52165884e828bc2 not found: ID does not exist" Dec 13 07:07:16 crc kubenswrapper[5048]: I1213 07:07:16.256895 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:07:16 crc kubenswrapper[5048]: I1213 07:07:16.257213 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:07:16 crc kubenswrapper[5048]: I1213 07:07:16.580720 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3532860c-739d-417c-a173-f594bd4c073e" path="/var/lib/kubelet/pods/3532860c-739d-417c-a173-f594bd4c073e/volumes" Dec 13 07:07:46 crc kubenswrapper[5048]: I1213 07:07:46.216064 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:07:46 crc kubenswrapper[5048]: I1213 07:07:46.216724 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:08:16 crc kubenswrapper[5048]: I1213 07:08:16.216255 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:08:16 crc kubenswrapper[5048]: I1213 07:08:16.217144 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:08:16 crc kubenswrapper[5048]: I1213 07:08:16.217248 5048 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 07:08:16 crc kubenswrapper[5048]: I1213 07:08:16.218669 5048 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6"} pod="openshift-machine-config-operator/machine-config-daemon-j7hns" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 13 07:08:16 crc kubenswrapper[5048]: I1213 07:08:16.218786 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" containerID="cri-o://13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" gracePeriod=600 Dec 13 07:08:16 crc kubenswrapper[5048]: I1213 07:08:16.833398 5048 generic.go:334] "Generic (PLEG): container finished" podID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" exitCode=0 Dec 13 07:08:16 crc kubenswrapper[5048]: I1213 07:08:16.833477 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerDied","Data":"13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6"} Dec 13 07:08:16 crc kubenswrapper[5048]: I1213 07:08:16.833790 5048 scope.go:117] "RemoveContainer" containerID="53e69e2487d25cf71ad6303383d3f610cb5fd038d34d4d4b9e63796a8c974cf5" Dec 13 07:08:16 crc kubenswrapper[5048]: E1213 07:08:16.851270 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:08:17 crc kubenswrapper[5048]: I1213 07:08:17.849657 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:08:17 crc kubenswrapper[5048]: E1213 07:08:17.850114 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:08:31 crc kubenswrapper[5048]: I1213 07:08:31.567732 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:08:31 crc kubenswrapper[5048]: E1213 07:08:31.569238 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:08:42 crc kubenswrapper[5048]: I1213 07:08:42.567226 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:08:42 crc kubenswrapper[5048]: E1213 07:08:42.568125 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:08:54 crc kubenswrapper[5048]: I1213 07:08:54.566875 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:08:54 crc kubenswrapper[5048]: E1213 07:08:54.567759 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:09:05 crc kubenswrapper[5048]: I1213 07:09:05.566220 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:09:05 crc kubenswrapper[5048]: E1213 07:09:05.567057 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:09:16 crc kubenswrapper[5048]: I1213 07:09:16.573041 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:09:16 crc kubenswrapper[5048]: E1213 07:09:16.573862 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:09:28 crc kubenswrapper[5048]: I1213 07:09:28.567191 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:09:28 crc kubenswrapper[5048]: E1213 07:09:28.567958 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:09:40 crc kubenswrapper[5048]: I1213 07:09:40.566480 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:09:40 crc kubenswrapper[5048]: E1213 07:09:40.568037 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:09:45 crc kubenswrapper[5048]: I1213 07:09:45.627299 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-r6hq6"] Dec 13 07:09:45 crc kubenswrapper[5048]: E1213 07:09:45.628187 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3532860c-739d-417c-a173-f594bd4c073e" containerName="extract-content" Dec 13 07:09:45 crc kubenswrapper[5048]: I1213 07:09:45.628199 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="3532860c-739d-417c-a173-f594bd4c073e" containerName="extract-content" Dec 13 07:09:45 crc kubenswrapper[5048]: E1213 07:09:45.628218 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3532860c-739d-417c-a173-f594bd4c073e" containerName="registry-server" Dec 13 07:09:45 crc kubenswrapper[5048]: I1213 07:09:45.628224 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="3532860c-739d-417c-a173-f594bd4c073e" containerName="registry-server" Dec 13 07:09:45 crc kubenswrapper[5048]: E1213 07:09:45.628239 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3532860c-739d-417c-a173-f594bd4c073e" containerName="extract-utilities" Dec 13 07:09:45 crc kubenswrapper[5048]: I1213 07:09:45.628245 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="3532860c-739d-417c-a173-f594bd4c073e" containerName="extract-utilities" Dec 13 07:09:45 crc kubenswrapper[5048]: I1213 07:09:45.628512 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="3532860c-739d-417c-a173-f594bd4c073e" containerName="registry-server" Dec 13 07:09:45 crc kubenswrapper[5048]: I1213 07:09:45.629882 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r6hq6" Dec 13 07:09:45 crc kubenswrapper[5048]: I1213 07:09:45.646660 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r6hq6"] Dec 13 07:09:45 crc kubenswrapper[5048]: I1213 07:09:45.808586 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06c5faec-25e9-4b3d-a897-c9eaf1943687-catalog-content\") pod \"redhat-operators-r6hq6\" (UID: \"06c5faec-25e9-4b3d-a897-c9eaf1943687\") " pod="openshift-marketplace/redhat-operators-r6hq6" Dec 13 07:09:45 crc kubenswrapper[5048]: I1213 07:09:45.808719 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpq8j\" (UniqueName: \"kubernetes.io/projected/06c5faec-25e9-4b3d-a897-c9eaf1943687-kube-api-access-dpq8j\") pod \"redhat-operators-r6hq6\" (UID: \"06c5faec-25e9-4b3d-a897-c9eaf1943687\") " pod="openshift-marketplace/redhat-operators-r6hq6" Dec 13 07:09:45 crc kubenswrapper[5048]: I1213 07:09:45.808944 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06c5faec-25e9-4b3d-a897-c9eaf1943687-utilities\") pod \"redhat-operators-r6hq6\" (UID: \"06c5faec-25e9-4b3d-a897-c9eaf1943687\") " pod="openshift-marketplace/redhat-operators-r6hq6" Dec 13 07:09:45 crc kubenswrapper[5048]: I1213 07:09:45.910619 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06c5faec-25e9-4b3d-a897-c9eaf1943687-utilities\") pod \"redhat-operators-r6hq6\" (UID: \"06c5faec-25e9-4b3d-a897-c9eaf1943687\") " pod="openshift-marketplace/redhat-operators-r6hq6" Dec 13 07:09:45 crc kubenswrapper[5048]: I1213 07:09:45.910714 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06c5faec-25e9-4b3d-a897-c9eaf1943687-catalog-content\") pod \"redhat-operators-r6hq6\" (UID: \"06c5faec-25e9-4b3d-a897-c9eaf1943687\") " pod="openshift-marketplace/redhat-operators-r6hq6" Dec 13 07:09:45 crc kubenswrapper[5048]: I1213 07:09:45.910751 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpq8j\" (UniqueName: \"kubernetes.io/projected/06c5faec-25e9-4b3d-a897-c9eaf1943687-kube-api-access-dpq8j\") pod \"redhat-operators-r6hq6\" (UID: \"06c5faec-25e9-4b3d-a897-c9eaf1943687\") " pod="openshift-marketplace/redhat-operators-r6hq6" Dec 13 07:09:45 crc kubenswrapper[5048]: I1213 07:09:45.911217 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06c5faec-25e9-4b3d-a897-c9eaf1943687-utilities\") pod \"redhat-operators-r6hq6\" (UID: \"06c5faec-25e9-4b3d-a897-c9eaf1943687\") " pod="openshift-marketplace/redhat-operators-r6hq6" Dec 13 07:09:45 crc kubenswrapper[5048]: I1213 07:09:45.911233 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06c5faec-25e9-4b3d-a897-c9eaf1943687-catalog-content\") pod \"redhat-operators-r6hq6\" (UID: \"06c5faec-25e9-4b3d-a897-c9eaf1943687\") " pod="openshift-marketplace/redhat-operators-r6hq6" Dec 13 07:09:45 crc kubenswrapper[5048]: I1213 07:09:45.939784 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpq8j\" (UniqueName: \"kubernetes.io/projected/06c5faec-25e9-4b3d-a897-c9eaf1943687-kube-api-access-dpq8j\") pod \"redhat-operators-r6hq6\" (UID: \"06c5faec-25e9-4b3d-a897-c9eaf1943687\") " pod="openshift-marketplace/redhat-operators-r6hq6" Dec 13 07:09:45 crc kubenswrapper[5048]: I1213 07:09:45.961681 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r6hq6" Dec 13 07:09:46 crc kubenswrapper[5048]: I1213 07:09:46.443299 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r6hq6"] Dec 13 07:09:46 crc kubenswrapper[5048]: I1213 07:09:46.638244 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r6hq6" event={"ID":"06c5faec-25e9-4b3d-a897-c9eaf1943687","Type":"ContainerStarted","Data":"d5716172c671603ddcfae10a22288545f38ac9264c832a46ab39290ab5016dd4"} Dec 13 07:09:47 crc kubenswrapper[5048]: I1213 07:09:47.646237 5048 generic.go:334] "Generic (PLEG): container finished" podID="06c5faec-25e9-4b3d-a897-c9eaf1943687" containerID="acb47b22a6368efe0609102ba7a0c52c6e9cb1f6d5c667c4cc4cddc0caa4ad12" exitCode=0 Dec 13 07:09:47 crc kubenswrapper[5048]: I1213 07:09:47.646302 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r6hq6" event={"ID":"06c5faec-25e9-4b3d-a897-c9eaf1943687","Type":"ContainerDied","Data":"acb47b22a6368efe0609102ba7a0c52c6e9cb1f6d5c667c4cc4cddc0caa4ad12"} Dec 13 07:09:47 crc kubenswrapper[5048]: I1213 07:09:47.648980 5048 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 13 07:09:48 crc kubenswrapper[5048]: I1213 07:09:48.656487 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r6hq6" event={"ID":"06c5faec-25e9-4b3d-a897-c9eaf1943687","Type":"ContainerStarted","Data":"b560dd9ac49e172004d31f01d1c38e2d399c2925a2ea080922ae9928992892f0"} Dec 13 07:09:49 crc kubenswrapper[5048]: I1213 07:09:49.669900 5048 generic.go:334] "Generic (PLEG): container finished" podID="06c5faec-25e9-4b3d-a897-c9eaf1943687" containerID="b560dd9ac49e172004d31f01d1c38e2d399c2925a2ea080922ae9928992892f0" exitCode=0 Dec 13 07:09:49 crc kubenswrapper[5048]: I1213 07:09:49.670064 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r6hq6" event={"ID":"06c5faec-25e9-4b3d-a897-c9eaf1943687","Type":"ContainerDied","Data":"b560dd9ac49e172004d31f01d1c38e2d399c2925a2ea080922ae9928992892f0"} Dec 13 07:09:50 crc kubenswrapper[5048]: I1213 07:09:50.683098 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r6hq6" event={"ID":"06c5faec-25e9-4b3d-a897-c9eaf1943687","Type":"ContainerStarted","Data":"1cd77eb7949f92eeccbb23a15c8f62c491ded0eb87f87b709ff599d21e6b9a46"} Dec 13 07:09:50 crc kubenswrapper[5048]: I1213 07:09:50.710835 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-r6hq6" podStartSLOduration=3.190647716 podStartE2EDuration="5.710815412s" podCreationTimestamp="2025-12-13 07:09:45 +0000 UTC" firstStartedPulling="2025-12-13 07:09:47.648705996 +0000 UTC m=+2421.515300577" lastFinishedPulling="2025-12-13 07:09:50.168873682 +0000 UTC m=+2424.035468273" observedRunningTime="2025-12-13 07:09:50.70525287 +0000 UTC m=+2424.571847451" watchObservedRunningTime="2025-12-13 07:09:50.710815412 +0000 UTC m=+2424.577409993" Dec 13 07:09:53 crc kubenswrapper[5048]: I1213 07:09:53.566897 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:09:53 crc kubenswrapper[5048]: E1213 07:09:53.568597 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:09:55 crc kubenswrapper[5048]: I1213 07:09:55.962811 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-r6hq6" Dec 13 07:09:55 crc kubenswrapper[5048]: I1213 07:09:55.963197 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-r6hq6" Dec 13 07:09:56 crc kubenswrapper[5048]: I1213 07:09:56.014491 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-r6hq6" Dec 13 07:09:56 crc kubenswrapper[5048]: I1213 07:09:56.784027 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-r6hq6" Dec 13 07:09:56 crc kubenswrapper[5048]: I1213 07:09:56.833133 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r6hq6"] Dec 13 07:09:58 crc kubenswrapper[5048]: I1213 07:09:58.751057 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-r6hq6" podUID="06c5faec-25e9-4b3d-a897-c9eaf1943687" containerName="registry-server" containerID="cri-o://1cd77eb7949f92eeccbb23a15c8f62c491ded0eb87f87b709ff599d21e6b9a46" gracePeriod=2 Dec 13 07:10:00 crc kubenswrapper[5048]: I1213 07:10:00.779677 5048 generic.go:334] "Generic (PLEG): container finished" podID="06c5faec-25e9-4b3d-a897-c9eaf1943687" containerID="1cd77eb7949f92eeccbb23a15c8f62c491ded0eb87f87b709ff599d21e6b9a46" exitCode=0 Dec 13 07:10:00 crc kubenswrapper[5048]: I1213 07:10:00.779715 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r6hq6" event={"ID":"06c5faec-25e9-4b3d-a897-c9eaf1943687","Type":"ContainerDied","Data":"1cd77eb7949f92eeccbb23a15c8f62c491ded0eb87f87b709ff599d21e6b9a46"} Dec 13 07:10:01 crc kubenswrapper[5048]: I1213 07:10:01.466096 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r6hq6" Dec 13 07:10:01 crc kubenswrapper[5048]: I1213 07:10:01.519611 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dpq8j\" (UniqueName: \"kubernetes.io/projected/06c5faec-25e9-4b3d-a897-c9eaf1943687-kube-api-access-dpq8j\") pod \"06c5faec-25e9-4b3d-a897-c9eaf1943687\" (UID: \"06c5faec-25e9-4b3d-a897-c9eaf1943687\") " Dec 13 07:10:01 crc kubenswrapper[5048]: I1213 07:10:01.519697 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06c5faec-25e9-4b3d-a897-c9eaf1943687-utilities\") pod \"06c5faec-25e9-4b3d-a897-c9eaf1943687\" (UID: \"06c5faec-25e9-4b3d-a897-c9eaf1943687\") " Dec 13 07:10:01 crc kubenswrapper[5048]: I1213 07:10:01.519881 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06c5faec-25e9-4b3d-a897-c9eaf1943687-catalog-content\") pod \"06c5faec-25e9-4b3d-a897-c9eaf1943687\" (UID: \"06c5faec-25e9-4b3d-a897-c9eaf1943687\") " Dec 13 07:10:01 crc kubenswrapper[5048]: I1213 07:10:01.520855 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06c5faec-25e9-4b3d-a897-c9eaf1943687-utilities" (OuterVolumeSpecName: "utilities") pod "06c5faec-25e9-4b3d-a897-c9eaf1943687" (UID: "06c5faec-25e9-4b3d-a897-c9eaf1943687"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:10:01 crc kubenswrapper[5048]: I1213 07:10:01.526631 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06c5faec-25e9-4b3d-a897-c9eaf1943687-kube-api-access-dpq8j" (OuterVolumeSpecName: "kube-api-access-dpq8j") pod "06c5faec-25e9-4b3d-a897-c9eaf1943687" (UID: "06c5faec-25e9-4b3d-a897-c9eaf1943687"). InnerVolumeSpecName "kube-api-access-dpq8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:10:01 crc kubenswrapper[5048]: I1213 07:10:01.621970 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dpq8j\" (UniqueName: \"kubernetes.io/projected/06c5faec-25e9-4b3d-a897-c9eaf1943687-kube-api-access-dpq8j\") on node \"crc\" DevicePath \"\"" Dec 13 07:10:01 crc kubenswrapper[5048]: I1213 07:10:01.622007 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06c5faec-25e9-4b3d-a897-c9eaf1943687-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 07:10:01 crc kubenswrapper[5048]: I1213 07:10:01.630982 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06c5faec-25e9-4b3d-a897-c9eaf1943687-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "06c5faec-25e9-4b3d-a897-c9eaf1943687" (UID: "06c5faec-25e9-4b3d-a897-c9eaf1943687"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:10:01 crc kubenswrapper[5048]: I1213 07:10:01.723773 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06c5faec-25e9-4b3d-a897-c9eaf1943687-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 07:10:01 crc kubenswrapper[5048]: I1213 07:10:01.789758 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r6hq6" event={"ID":"06c5faec-25e9-4b3d-a897-c9eaf1943687","Type":"ContainerDied","Data":"d5716172c671603ddcfae10a22288545f38ac9264c832a46ab39290ab5016dd4"} Dec 13 07:10:01 crc kubenswrapper[5048]: I1213 07:10:01.789826 5048 scope.go:117] "RemoveContainer" containerID="1cd77eb7949f92eeccbb23a15c8f62c491ded0eb87f87b709ff599d21e6b9a46" Dec 13 07:10:01 crc kubenswrapper[5048]: I1213 07:10:01.789833 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r6hq6" Dec 13 07:10:01 crc kubenswrapper[5048]: I1213 07:10:01.810364 5048 scope.go:117] "RemoveContainer" containerID="b560dd9ac49e172004d31f01d1c38e2d399c2925a2ea080922ae9928992892f0" Dec 13 07:10:01 crc kubenswrapper[5048]: I1213 07:10:01.826126 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r6hq6"] Dec 13 07:10:01 crc kubenswrapper[5048]: I1213 07:10:01.834191 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-r6hq6"] Dec 13 07:10:01 crc kubenswrapper[5048]: I1213 07:10:01.858721 5048 scope.go:117] "RemoveContainer" containerID="acb47b22a6368efe0609102ba7a0c52c6e9cb1f6d5c667c4cc4cddc0caa4ad12" Dec 13 07:10:02 crc kubenswrapper[5048]: I1213 07:10:02.578685 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06c5faec-25e9-4b3d-a897-c9eaf1943687" path="/var/lib/kubelet/pods/06c5faec-25e9-4b3d-a897-c9eaf1943687/volumes" Dec 13 07:10:07 crc kubenswrapper[5048]: I1213 07:10:07.567536 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:10:07 crc kubenswrapper[5048]: E1213 07:10:07.568453 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:10:21 crc kubenswrapper[5048]: I1213 07:10:21.499097 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xgbmz"] Dec 13 07:10:21 crc kubenswrapper[5048]: E1213 07:10:21.502001 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06c5faec-25e9-4b3d-a897-c9eaf1943687" containerName="registry-server" Dec 13 07:10:21 crc kubenswrapper[5048]: I1213 07:10:21.502039 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="06c5faec-25e9-4b3d-a897-c9eaf1943687" containerName="registry-server" Dec 13 07:10:21 crc kubenswrapper[5048]: E1213 07:10:21.502085 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06c5faec-25e9-4b3d-a897-c9eaf1943687" containerName="extract-utilities" Dec 13 07:10:21 crc kubenswrapper[5048]: I1213 07:10:21.502095 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="06c5faec-25e9-4b3d-a897-c9eaf1943687" containerName="extract-utilities" Dec 13 07:10:21 crc kubenswrapper[5048]: E1213 07:10:21.502120 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06c5faec-25e9-4b3d-a897-c9eaf1943687" containerName="extract-content" Dec 13 07:10:21 crc kubenswrapper[5048]: I1213 07:10:21.502130 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="06c5faec-25e9-4b3d-a897-c9eaf1943687" containerName="extract-content" Dec 13 07:10:21 crc kubenswrapper[5048]: I1213 07:10:21.502349 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="06c5faec-25e9-4b3d-a897-c9eaf1943687" containerName="registry-server" Dec 13 07:10:21 crc kubenswrapper[5048]: I1213 07:10:21.503998 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xgbmz" Dec 13 07:10:21 crc kubenswrapper[5048]: I1213 07:10:21.510234 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xgbmz"] Dec 13 07:10:21 crc kubenswrapper[5048]: I1213 07:10:21.567242 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:10:21 crc kubenswrapper[5048]: E1213 07:10:21.567493 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:10:21 crc kubenswrapper[5048]: I1213 07:10:21.625697 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mp8mt\" (UniqueName: \"kubernetes.io/projected/2b459410-1543-46b9-b337-7c4dcddf5637-kube-api-access-mp8mt\") pod \"certified-operators-xgbmz\" (UID: \"2b459410-1543-46b9-b337-7c4dcddf5637\") " pod="openshift-marketplace/certified-operators-xgbmz" Dec 13 07:10:21 crc kubenswrapper[5048]: I1213 07:10:21.625755 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b459410-1543-46b9-b337-7c4dcddf5637-utilities\") pod \"certified-operators-xgbmz\" (UID: \"2b459410-1543-46b9-b337-7c4dcddf5637\") " pod="openshift-marketplace/certified-operators-xgbmz" Dec 13 07:10:21 crc kubenswrapper[5048]: I1213 07:10:21.625856 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b459410-1543-46b9-b337-7c4dcddf5637-catalog-content\") pod \"certified-operators-xgbmz\" (UID: \"2b459410-1543-46b9-b337-7c4dcddf5637\") " pod="openshift-marketplace/certified-operators-xgbmz" Dec 13 07:10:21 crc kubenswrapper[5048]: I1213 07:10:21.728323 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b459410-1543-46b9-b337-7c4dcddf5637-catalog-content\") pod \"certified-operators-xgbmz\" (UID: \"2b459410-1543-46b9-b337-7c4dcddf5637\") " pod="openshift-marketplace/certified-operators-xgbmz" Dec 13 07:10:21 crc kubenswrapper[5048]: I1213 07:10:21.728495 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mp8mt\" (UniqueName: \"kubernetes.io/projected/2b459410-1543-46b9-b337-7c4dcddf5637-kube-api-access-mp8mt\") pod \"certified-operators-xgbmz\" (UID: \"2b459410-1543-46b9-b337-7c4dcddf5637\") " pod="openshift-marketplace/certified-operators-xgbmz" Dec 13 07:10:21 crc kubenswrapper[5048]: I1213 07:10:21.728525 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b459410-1543-46b9-b337-7c4dcddf5637-utilities\") pod \"certified-operators-xgbmz\" (UID: \"2b459410-1543-46b9-b337-7c4dcddf5637\") " pod="openshift-marketplace/certified-operators-xgbmz" Dec 13 07:10:21 crc kubenswrapper[5048]: I1213 07:10:21.729110 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b459410-1543-46b9-b337-7c4dcddf5637-catalog-content\") pod \"certified-operators-xgbmz\" (UID: \"2b459410-1543-46b9-b337-7c4dcddf5637\") " pod="openshift-marketplace/certified-operators-xgbmz" Dec 13 07:10:21 crc kubenswrapper[5048]: I1213 07:10:21.729143 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b459410-1543-46b9-b337-7c4dcddf5637-utilities\") pod \"certified-operators-xgbmz\" (UID: \"2b459410-1543-46b9-b337-7c4dcddf5637\") " pod="openshift-marketplace/certified-operators-xgbmz" Dec 13 07:10:21 crc kubenswrapper[5048]: I1213 07:10:21.748599 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mp8mt\" (UniqueName: \"kubernetes.io/projected/2b459410-1543-46b9-b337-7c4dcddf5637-kube-api-access-mp8mt\") pod \"certified-operators-xgbmz\" (UID: \"2b459410-1543-46b9-b337-7c4dcddf5637\") " pod="openshift-marketplace/certified-operators-xgbmz" Dec 13 07:10:21 crc kubenswrapper[5048]: I1213 07:10:21.872102 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xgbmz" Dec 13 07:10:22 crc kubenswrapper[5048]: I1213 07:10:22.204994 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xgbmz"] Dec 13 07:10:23 crc kubenswrapper[5048]: I1213 07:10:23.034589 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xgbmz" event={"ID":"2b459410-1543-46b9-b337-7c4dcddf5637","Type":"ContainerDied","Data":"2884e14510db40b09e846b0787167fdc54960b13b6a1487ebe26e48ebb7f8fd1"} Dec 13 07:10:23 crc kubenswrapper[5048]: I1213 07:10:23.033985 5048 generic.go:334] "Generic (PLEG): container finished" podID="2b459410-1543-46b9-b337-7c4dcddf5637" containerID="2884e14510db40b09e846b0787167fdc54960b13b6a1487ebe26e48ebb7f8fd1" exitCode=0 Dec 13 07:10:23 crc kubenswrapper[5048]: I1213 07:10:23.036384 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xgbmz" event={"ID":"2b459410-1543-46b9-b337-7c4dcddf5637","Type":"ContainerStarted","Data":"eb9fee1c2d9cfbfa799453f0d6b02813a3ac174621df7987f8e79c54e78cb0e6"} Dec 13 07:10:25 crc kubenswrapper[5048]: I1213 07:10:25.056814 5048 generic.go:334] "Generic (PLEG): container finished" podID="2b459410-1543-46b9-b337-7c4dcddf5637" containerID="7f2ceb3f0335bbde271f49e344b90c3941a4095cd58e85ca1833ed57b0e0d1f5" exitCode=0 Dec 13 07:10:25 crc kubenswrapper[5048]: I1213 07:10:25.057004 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xgbmz" event={"ID":"2b459410-1543-46b9-b337-7c4dcddf5637","Type":"ContainerDied","Data":"7f2ceb3f0335bbde271f49e344b90c3941a4095cd58e85ca1833ed57b0e0d1f5"} Dec 13 07:10:26 crc kubenswrapper[5048]: I1213 07:10:26.071185 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xgbmz" event={"ID":"2b459410-1543-46b9-b337-7c4dcddf5637","Type":"ContainerStarted","Data":"037289d1592c4179c791901d5254f7f94aaa39eceaff30147daa8c01301782c6"} Dec 13 07:10:26 crc kubenswrapper[5048]: I1213 07:10:26.094684 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xgbmz" podStartSLOduration=2.651111989 podStartE2EDuration="5.094659955s" podCreationTimestamp="2025-12-13 07:10:21 +0000 UTC" firstStartedPulling="2025-12-13 07:10:23.037156454 +0000 UTC m=+2456.903751035" lastFinishedPulling="2025-12-13 07:10:25.48070442 +0000 UTC m=+2459.347299001" observedRunningTime="2025-12-13 07:10:26.090199403 +0000 UTC m=+2459.956793994" watchObservedRunningTime="2025-12-13 07:10:26.094659955 +0000 UTC m=+2459.961254536" Dec 13 07:10:31 crc kubenswrapper[5048]: I1213 07:10:31.872816 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xgbmz" Dec 13 07:10:31 crc kubenswrapper[5048]: I1213 07:10:31.873814 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xgbmz" Dec 13 07:10:31 crc kubenswrapper[5048]: I1213 07:10:31.928616 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xgbmz" Dec 13 07:10:32 crc kubenswrapper[5048]: I1213 07:10:32.197665 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xgbmz" Dec 13 07:10:32 crc kubenswrapper[5048]: I1213 07:10:32.250666 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xgbmz"] Dec 13 07:10:34 crc kubenswrapper[5048]: I1213 07:10:34.165033 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-xgbmz" podUID="2b459410-1543-46b9-b337-7c4dcddf5637" containerName="registry-server" containerID="cri-o://037289d1592c4179c791901d5254f7f94aaa39eceaff30147daa8c01301782c6" gracePeriod=2 Dec 13 07:10:34 crc kubenswrapper[5048]: I1213 07:10:34.620407 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xgbmz" Dec 13 07:10:34 crc kubenswrapper[5048]: I1213 07:10:34.730967 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mp8mt\" (UniqueName: \"kubernetes.io/projected/2b459410-1543-46b9-b337-7c4dcddf5637-kube-api-access-mp8mt\") pod \"2b459410-1543-46b9-b337-7c4dcddf5637\" (UID: \"2b459410-1543-46b9-b337-7c4dcddf5637\") " Dec 13 07:10:34 crc kubenswrapper[5048]: I1213 07:10:34.731178 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b459410-1543-46b9-b337-7c4dcddf5637-catalog-content\") pod \"2b459410-1543-46b9-b337-7c4dcddf5637\" (UID: \"2b459410-1543-46b9-b337-7c4dcddf5637\") " Dec 13 07:10:34 crc kubenswrapper[5048]: I1213 07:10:34.731218 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b459410-1543-46b9-b337-7c4dcddf5637-utilities\") pod \"2b459410-1543-46b9-b337-7c4dcddf5637\" (UID: \"2b459410-1543-46b9-b337-7c4dcddf5637\") " Dec 13 07:10:34 crc kubenswrapper[5048]: I1213 07:10:34.733098 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b459410-1543-46b9-b337-7c4dcddf5637-utilities" (OuterVolumeSpecName: "utilities") pod "2b459410-1543-46b9-b337-7c4dcddf5637" (UID: "2b459410-1543-46b9-b337-7c4dcddf5637"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:10:34 crc kubenswrapper[5048]: I1213 07:10:34.736370 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b459410-1543-46b9-b337-7c4dcddf5637-kube-api-access-mp8mt" (OuterVolumeSpecName: "kube-api-access-mp8mt") pod "2b459410-1543-46b9-b337-7c4dcddf5637" (UID: "2b459410-1543-46b9-b337-7c4dcddf5637"). InnerVolumeSpecName "kube-api-access-mp8mt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:10:34 crc kubenswrapper[5048]: I1213 07:10:34.833759 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b459410-1543-46b9-b337-7c4dcddf5637-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 07:10:34 crc kubenswrapper[5048]: I1213 07:10:34.833802 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mp8mt\" (UniqueName: \"kubernetes.io/projected/2b459410-1543-46b9-b337-7c4dcddf5637-kube-api-access-mp8mt\") on node \"crc\" DevicePath \"\"" Dec 13 07:10:34 crc kubenswrapper[5048]: I1213 07:10:34.962265 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b459410-1543-46b9-b337-7c4dcddf5637-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2b459410-1543-46b9-b337-7c4dcddf5637" (UID: "2b459410-1543-46b9-b337-7c4dcddf5637"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:10:35 crc kubenswrapper[5048]: I1213 07:10:35.037462 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b459410-1543-46b9-b337-7c4dcddf5637-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 07:10:35 crc kubenswrapper[5048]: I1213 07:10:35.176573 5048 generic.go:334] "Generic (PLEG): container finished" podID="2b459410-1543-46b9-b337-7c4dcddf5637" containerID="037289d1592c4179c791901d5254f7f94aaa39eceaff30147daa8c01301782c6" exitCode=0 Dec 13 07:10:35 crc kubenswrapper[5048]: I1213 07:10:35.176647 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xgbmz" Dec 13 07:10:35 crc kubenswrapper[5048]: I1213 07:10:35.176644 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xgbmz" event={"ID":"2b459410-1543-46b9-b337-7c4dcddf5637","Type":"ContainerDied","Data":"037289d1592c4179c791901d5254f7f94aaa39eceaff30147daa8c01301782c6"} Dec 13 07:10:35 crc kubenswrapper[5048]: I1213 07:10:35.176745 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xgbmz" event={"ID":"2b459410-1543-46b9-b337-7c4dcddf5637","Type":"ContainerDied","Data":"eb9fee1c2d9cfbfa799453f0d6b02813a3ac174621df7987f8e79c54e78cb0e6"} Dec 13 07:10:35 crc kubenswrapper[5048]: I1213 07:10:35.176767 5048 scope.go:117] "RemoveContainer" containerID="037289d1592c4179c791901d5254f7f94aaa39eceaff30147daa8c01301782c6" Dec 13 07:10:35 crc kubenswrapper[5048]: I1213 07:10:35.198075 5048 scope.go:117] "RemoveContainer" containerID="7f2ceb3f0335bbde271f49e344b90c3941a4095cd58e85ca1833ed57b0e0d1f5" Dec 13 07:10:35 crc kubenswrapper[5048]: I1213 07:10:35.213853 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xgbmz"] Dec 13 07:10:35 crc kubenswrapper[5048]: I1213 07:10:35.225635 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-xgbmz"] Dec 13 07:10:35 crc kubenswrapper[5048]: I1213 07:10:35.243338 5048 scope.go:117] "RemoveContainer" containerID="2884e14510db40b09e846b0787167fdc54960b13b6a1487ebe26e48ebb7f8fd1" Dec 13 07:10:35 crc kubenswrapper[5048]: I1213 07:10:35.265857 5048 scope.go:117] "RemoveContainer" containerID="037289d1592c4179c791901d5254f7f94aaa39eceaff30147daa8c01301782c6" Dec 13 07:10:35 crc kubenswrapper[5048]: E1213 07:10:35.266794 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"037289d1592c4179c791901d5254f7f94aaa39eceaff30147daa8c01301782c6\": container with ID starting with 037289d1592c4179c791901d5254f7f94aaa39eceaff30147daa8c01301782c6 not found: ID does not exist" containerID="037289d1592c4179c791901d5254f7f94aaa39eceaff30147daa8c01301782c6" Dec 13 07:10:35 crc kubenswrapper[5048]: I1213 07:10:35.266867 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"037289d1592c4179c791901d5254f7f94aaa39eceaff30147daa8c01301782c6"} err="failed to get container status \"037289d1592c4179c791901d5254f7f94aaa39eceaff30147daa8c01301782c6\": rpc error: code = NotFound desc = could not find container \"037289d1592c4179c791901d5254f7f94aaa39eceaff30147daa8c01301782c6\": container with ID starting with 037289d1592c4179c791901d5254f7f94aaa39eceaff30147daa8c01301782c6 not found: ID does not exist" Dec 13 07:10:35 crc kubenswrapper[5048]: I1213 07:10:35.266901 5048 scope.go:117] "RemoveContainer" containerID="7f2ceb3f0335bbde271f49e344b90c3941a4095cd58e85ca1833ed57b0e0d1f5" Dec 13 07:10:35 crc kubenswrapper[5048]: E1213 07:10:35.267262 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f2ceb3f0335bbde271f49e344b90c3941a4095cd58e85ca1833ed57b0e0d1f5\": container with ID starting with 7f2ceb3f0335bbde271f49e344b90c3941a4095cd58e85ca1833ed57b0e0d1f5 not found: ID does not exist" containerID="7f2ceb3f0335bbde271f49e344b90c3941a4095cd58e85ca1833ed57b0e0d1f5" Dec 13 07:10:35 crc kubenswrapper[5048]: I1213 07:10:35.267296 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f2ceb3f0335bbde271f49e344b90c3941a4095cd58e85ca1833ed57b0e0d1f5"} err="failed to get container status \"7f2ceb3f0335bbde271f49e344b90c3941a4095cd58e85ca1833ed57b0e0d1f5\": rpc error: code = NotFound desc = could not find container \"7f2ceb3f0335bbde271f49e344b90c3941a4095cd58e85ca1833ed57b0e0d1f5\": container with ID starting with 7f2ceb3f0335bbde271f49e344b90c3941a4095cd58e85ca1833ed57b0e0d1f5 not found: ID does not exist" Dec 13 07:10:35 crc kubenswrapper[5048]: I1213 07:10:35.267320 5048 scope.go:117] "RemoveContainer" containerID="2884e14510db40b09e846b0787167fdc54960b13b6a1487ebe26e48ebb7f8fd1" Dec 13 07:10:35 crc kubenswrapper[5048]: E1213 07:10:35.267771 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2884e14510db40b09e846b0787167fdc54960b13b6a1487ebe26e48ebb7f8fd1\": container with ID starting with 2884e14510db40b09e846b0787167fdc54960b13b6a1487ebe26e48ebb7f8fd1 not found: ID does not exist" containerID="2884e14510db40b09e846b0787167fdc54960b13b6a1487ebe26e48ebb7f8fd1" Dec 13 07:10:35 crc kubenswrapper[5048]: I1213 07:10:35.267793 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2884e14510db40b09e846b0787167fdc54960b13b6a1487ebe26e48ebb7f8fd1"} err="failed to get container status \"2884e14510db40b09e846b0787167fdc54960b13b6a1487ebe26e48ebb7f8fd1\": rpc error: code = NotFound desc = could not find container \"2884e14510db40b09e846b0787167fdc54960b13b6a1487ebe26e48ebb7f8fd1\": container with ID starting with 2884e14510db40b09e846b0787167fdc54960b13b6a1487ebe26e48ebb7f8fd1 not found: ID does not exist" Dec 13 07:10:36 crc kubenswrapper[5048]: I1213 07:10:36.571962 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:10:36 crc kubenswrapper[5048]: E1213 07:10:36.572610 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:10:36 crc kubenswrapper[5048]: I1213 07:10:36.582128 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b459410-1543-46b9-b337-7c4dcddf5637" path="/var/lib/kubelet/pods/2b459410-1543-46b9-b337-7c4dcddf5637/volumes" Dec 13 07:10:50 crc kubenswrapper[5048]: I1213 07:10:50.567238 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:10:50 crc kubenswrapper[5048]: E1213 07:10:50.568166 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:11:05 crc kubenswrapper[5048]: I1213 07:11:05.566504 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:11:05 crc kubenswrapper[5048]: E1213 07:11:05.567364 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:11:17 crc kubenswrapper[5048]: I1213 07:11:17.567242 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:11:17 crc kubenswrapper[5048]: E1213 07:11:17.568053 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:11:26 crc kubenswrapper[5048]: I1213 07:11:26.768304 5048 generic.go:334] "Generic (PLEG): container finished" podID="482402ba-adeb-4175-911a-2ab863e44d4e" containerID="12fe4ef85bd88027ced980cc2a19a1aec21804f45031346ceff0845904857d63" exitCode=0 Dec 13 07:11:26 crc kubenswrapper[5048]: I1213 07:11:26.768389 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" event={"ID":"482402ba-adeb-4175-911a-2ab863e44d4e","Type":"ContainerDied","Data":"12fe4ef85bd88027ced980cc2a19a1aec21804f45031346ceff0845904857d63"} Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.313009 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.425371 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-libvirt-secret-0\") pod \"482402ba-adeb-4175-911a-2ab863e44d4e\" (UID: \"482402ba-adeb-4175-911a-2ab863e44d4e\") " Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.425465 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-libvirt-combined-ca-bundle\") pod \"482402ba-adeb-4175-911a-2ab863e44d4e\" (UID: \"482402ba-adeb-4175-911a-2ab863e44d4e\") " Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.425505 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6dqmx\" (UniqueName: \"kubernetes.io/projected/482402ba-adeb-4175-911a-2ab863e44d4e-kube-api-access-6dqmx\") pod \"482402ba-adeb-4175-911a-2ab863e44d4e\" (UID: \"482402ba-adeb-4175-911a-2ab863e44d4e\") " Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.425693 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-ssh-key\") pod \"482402ba-adeb-4175-911a-2ab863e44d4e\" (UID: \"482402ba-adeb-4175-911a-2ab863e44d4e\") " Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.425782 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-inventory\") pod \"482402ba-adeb-4175-911a-2ab863e44d4e\" (UID: \"482402ba-adeb-4175-911a-2ab863e44d4e\") " Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.431530 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/482402ba-adeb-4175-911a-2ab863e44d4e-kube-api-access-6dqmx" (OuterVolumeSpecName: "kube-api-access-6dqmx") pod "482402ba-adeb-4175-911a-2ab863e44d4e" (UID: "482402ba-adeb-4175-911a-2ab863e44d4e"). InnerVolumeSpecName "kube-api-access-6dqmx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.435265 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "482402ba-adeb-4175-911a-2ab863e44d4e" (UID: "482402ba-adeb-4175-911a-2ab863e44d4e"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.452828 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "482402ba-adeb-4175-911a-2ab863e44d4e" (UID: "482402ba-adeb-4175-911a-2ab863e44d4e"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.455712 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "482402ba-adeb-4175-911a-2ab863e44d4e" (UID: "482402ba-adeb-4175-911a-2ab863e44d4e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.457403 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-inventory" (OuterVolumeSpecName: "inventory") pod "482402ba-adeb-4175-911a-2ab863e44d4e" (UID: "482402ba-adeb-4175-911a-2ab863e44d4e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.528729 5048 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.528802 5048 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-inventory\") on node \"crc\" DevicePath \"\"" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.528824 5048 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.528838 5048 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/482402ba-adeb-4175-911a-2ab863e44d4e-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.528852 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6dqmx\" (UniqueName: \"kubernetes.io/projected/482402ba-adeb-4175-911a-2ab863e44d4e-kube-api-access-6dqmx\") on node \"crc\" DevicePath \"\"" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.786627 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" event={"ID":"482402ba-adeb-4175-911a-2ab863e44d4e","Type":"ContainerDied","Data":"4e8d64f62348f60791f05567cb49942a25d14938c18ebfacc51a58b983f681a5"} Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.786697 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4e8d64f62348f60791f05567cb49942a25d14938c18ebfacc51a58b983f681a5" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.786777 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-47k9k" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.885961 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx"] Dec 13 07:11:28 crc kubenswrapper[5048]: E1213 07:11:28.886491 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="482402ba-adeb-4175-911a-2ab863e44d4e" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.886515 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="482402ba-adeb-4175-911a-2ab863e44d4e" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Dec 13 07:11:28 crc kubenswrapper[5048]: E1213 07:11:28.886542 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b459410-1543-46b9-b337-7c4dcddf5637" containerName="extract-utilities" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.886552 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b459410-1543-46b9-b337-7c4dcddf5637" containerName="extract-utilities" Dec 13 07:11:28 crc kubenswrapper[5048]: E1213 07:11:28.886568 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b459410-1543-46b9-b337-7c4dcddf5637" containerName="registry-server" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.886578 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b459410-1543-46b9-b337-7c4dcddf5637" containerName="registry-server" Dec 13 07:11:28 crc kubenswrapper[5048]: E1213 07:11:28.886594 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b459410-1543-46b9-b337-7c4dcddf5637" containerName="extract-content" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.886604 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b459410-1543-46b9-b337-7c4dcddf5637" containerName="extract-content" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.886821 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="482402ba-adeb-4175-911a-2ab863e44d4e" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.886850 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b459410-1543-46b9-b337-7c4dcddf5637" containerName="registry-server" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.887629 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.893662 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.893692 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.893864 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.893957 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.893990 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.893662 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.894069 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hgp7p" Dec 13 07:11:28 crc kubenswrapper[5048]: I1213 07:11:28.918227 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx"] Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.038146 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.038206 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.038459 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.038552 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.038613 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/e460e258-aa7f-4839-9443-50b9afe4557b-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.038675 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.038771 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8wbt7\" (UniqueName: \"kubernetes.io/projected/e460e258-aa7f-4839-9443-50b9afe4557b-kube-api-access-8wbt7\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.038854 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.038957 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.140623 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.140769 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.140813 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/e460e258-aa7f-4839-9443-50b9afe4557b-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.140838 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.140885 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8wbt7\" (UniqueName: \"kubernetes.io/projected/e460e258-aa7f-4839-9443-50b9afe4557b-kube-api-access-8wbt7\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.140922 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.140965 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.141016 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.141046 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.142397 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/e460e258-aa7f-4839-9443-50b9afe4557b-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.144907 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.144907 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.144907 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.145377 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.145849 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.147519 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.148214 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.162601 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8wbt7\" (UniqueName: \"kubernetes.io/projected/e460e258-aa7f-4839-9443-50b9afe4557b-kube-api-access-8wbt7\") pod \"nova-edpm-deployment-openstack-edpm-ipam-flsxx\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.228568 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.567049 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:11:29 crc kubenswrapper[5048]: E1213 07:11:29.568233 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.763960 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx"] Dec 13 07:11:29 crc kubenswrapper[5048]: I1213 07:11:29.796507 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" event={"ID":"e460e258-aa7f-4839-9443-50b9afe4557b","Type":"ContainerStarted","Data":"62487e3cf11dff98500309b9aad1f4331f64694942d400668e06a5874d222d32"} Dec 13 07:11:30 crc kubenswrapper[5048]: I1213 07:11:30.807036 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" event={"ID":"e460e258-aa7f-4839-9443-50b9afe4557b","Type":"ContainerStarted","Data":"48774b2837e7e6f906bea7feb88b7a3a1c3cee09d0b5a3c01f26585857955dd5"} Dec 13 07:11:30 crc kubenswrapper[5048]: I1213 07:11:30.828541 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" podStartSLOduration=2.32951409 podStartE2EDuration="2.828525977s" podCreationTimestamp="2025-12-13 07:11:28 +0000 UTC" firstStartedPulling="2025-12-13 07:11:29.766552207 +0000 UTC m=+2523.633146788" lastFinishedPulling="2025-12-13 07:11:30.265564084 +0000 UTC m=+2524.132158675" observedRunningTime="2025-12-13 07:11:30.82424151 +0000 UTC m=+2524.690836111" watchObservedRunningTime="2025-12-13 07:11:30.828525977 +0000 UTC m=+2524.695120558" Dec 13 07:11:42 crc kubenswrapper[5048]: I1213 07:11:42.567016 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:11:42 crc kubenswrapper[5048]: E1213 07:11:42.567962 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:11:57 crc kubenswrapper[5048]: I1213 07:11:57.567396 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:11:57 crc kubenswrapper[5048]: E1213 07:11:57.568189 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:12:10 crc kubenswrapper[5048]: I1213 07:12:10.567418 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:12:10 crc kubenswrapper[5048]: E1213 07:12:10.568450 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:12:24 crc kubenswrapper[5048]: I1213 07:12:24.566892 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:12:24 crc kubenswrapper[5048]: E1213 07:12:24.567681 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:12:35 crc kubenswrapper[5048]: I1213 07:12:35.567247 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:12:35 crc kubenswrapper[5048]: E1213 07:12:35.569142 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:12:50 crc kubenswrapper[5048]: I1213 07:12:50.566611 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:12:50 crc kubenswrapper[5048]: E1213 07:12:50.568467 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:13:03 crc kubenswrapper[5048]: I1213 07:13:03.566816 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:13:03 crc kubenswrapper[5048]: E1213 07:13:03.567590 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:13:15 crc kubenswrapper[5048]: I1213 07:13:15.567181 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:13:15 crc kubenswrapper[5048]: E1213 07:13:15.567997 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:13:28 crc kubenswrapper[5048]: I1213 07:13:28.567770 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:13:28 crc kubenswrapper[5048]: I1213 07:13:28.977324 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerStarted","Data":"8a03ae6bfe080367b968888a6b5a1193924bceab28a806b9d3c7ee5f1f18acb6"} Dec 13 07:14:19 crc kubenswrapper[5048]: I1213 07:14:19.477957 5048 generic.go:334] "Generic (PLEG): container finished" podID="e460e258-aa7f-4839-9443-50b9afe4557b" containerID="48774b2837e7e6f906bea7feb88b7a3a1c3cee09d0b5a3c01f26585857955dd5" exitCode=0 Dec 13 07:14:19 crc kubenswrapper[5048]: I1213 07:14:19.478042 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" event={"ID":"e460e258-aa7f-4839-9443-50b9afe4557b","Type":"ContainerDied","Data":"48774b2837e7e6f906bea7feb88b7a3a1c3cee09d0b5a3c01f26585857955dd5"} Dec 13 07:14:20 crc kubenswrapper[5048]: I1213 07:14:20.902467 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:14:20 crc kubenswrapper[5048]: I1213 07:14:20.996477 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-cell1-compute-config-1\") pod \"e460e258-aa7f-4839-9443-50b9afe4557b\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " Dec 13 07:14:20 crc kubenswrapper[5048]: I1213 07:14:20.996541 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-migration-ssh-key-0\") pod \"e460e258-aa7f-4839-9443-50b9afe4557b\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " Dec 13 07:14:20 crc kubenswrapper[5048]: I1213 07:14:20.996565 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-inventory\") pod \"e460e258-aa7f-4839-9443-50b9afe4557b\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " Dec 13 07:14:20 crc kubenswrapper[5048]: I1213 07:14:20.996598 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8wbt7\" (UniqueName: \"kubernetes.io/projected/e460e258-aa7f-4839-9443-50b9afe4557b-kube-api-access-8wbt7\") pod \"e460e258-aa7f-4839-9443-50b9afe4557b\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " Dec 13 07:14:20 crc kubenswrapper[5048]: I1213 07:14:20.996640 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-cell1-compute-config-0\") pod \"e460e258-aa7f-4839-9443-50b9afe4557b\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " Dec 13 07:14:20 crc kubenswrapper[5048]: I1213 07:14:20.997255 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-ssh-key\") pod \"e460e258-aa7f-4839-9443-50b9afe4557b\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " Dec 13 07:14:20 crc kubenswrapper[5048]: I1213 07:14:20.997411 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-migration-ssh-key-1\") pod \"e460e258-aa7f-4839-9443-50b9afe4557b\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " Dec 13 07:14:20 crc kubenswrapper[5048]: I1213 07:14:20.997731 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-combined-ca-bundle\") pod \"e460e258-aa7f-4839-9443-50b9afe4557b\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " Dec 13 07:14:20 crc kubenswrapper[5048]: I1213 07:14:20.997773 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/e460e258-aa7f-4839-9443-50b9afe4557b-nova-extra-config-0\") pod \"e460e258-aa7f-4839-9443-50b9afe4557b\" (UID: \"e460e258-aa7f-4839-9443-50b9afe4557b\") " Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.003505 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "e460e258-aa7f-4839-9443-50b9afe4557b" (UID: "e460e258-aa7f-4839-9443-50b9afe4557b"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.015890 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e460e258-aa7f-4839-9443-50b9afe4557b-kube-api-access-8wbt7" (OuterVolumeSpecName: "kube-api-access-8wbt7") pod "e460e258-aa7f-4839-9443-50b9afe4557b" (UID: "e460e258-aa7f-4839-9443-50b9afe4557b"). InnerVolumeSpecName "kube-api-access-8wbt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.024859 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e460e258-aa7f-4839-9443-50b9afe4557b-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "e460e258-aa7f-4839-9443-50b9afe4557b" (UID: "e460e258-aa7f-4839-9443-50b9afe4557b"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.032711 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "e460e258-aa7f-4839-9443-50b9afe4557b" (UID: "e460e258-aa7f-4839-9443-50b9afe4557b"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.033902 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e460e258-aa7f-4839-9443-50b9afe4557b" (UID: "e460e258-aa7f-4839-9443-50b9afe4557b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.037345 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-inventory" (OuterVolumeSpecName: "inventory") pod "e460e258-aa7f-4839-9443-50b9afe4557b" (UID: "e460e258-aa7f-4839-9443-50b9afe4557b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.036914 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "e460e258-aa7f-4839-9443-50b9afe4557b" (UID: "e460e258-aa7f-4839-9443-50b9afe4557b"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.039320 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "e460e258-aa7f-4839-9443-50b9afe4557b" (UID: "e460e258-aa7f-4839-9443-50b9afe4557b"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.055482 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "e460e258-aa7f-4839-9443-50b9afe4557b" (UID: "e460e258-aa7f-4839-9443-50b9afe4557b"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.100057 5048 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.100086 5048 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.100096 5048 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-inventory\") on node \"crc\" DevicePath \"\"" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.100105 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8wbt7\" (UniqueName: \"kubernetes.io/projected/e460e258-aa7f-4839-9443-50b9afe4557b-kube-api-access-8wbt7\") on node \"crc\" DevicePath \"\"" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.100116 5048 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.100124 5048 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.100135 5048 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.100144 5048 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e460e258-aa7f-4839-9443-50b9afe4557b-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.100153 5048 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/e460e258-aa7f-4839-9443-50b9afe4557b-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.504172 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" event={"ID":"e460e258-aa7f-4839-9443-50b9afe4557b","Type":"ContainerDied","Data":"62487e3cf11dff98500309b9aad1f4331f64694942d400668e06a5874d222d32"} Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.504486 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="62487e3cf11dff98500309b9aad1f4331f64694942d400668e06a5874d222d32" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.504677 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-flsxx" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.595821 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf"] Dec 13 07:14:21 crc kubenswrapper[5048]: E1213 07:14:21.596408 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e460e258-aa7f-4839-9443-50b9afe4557b" containerName="nova-edpm-deployment-openstack-edpm-ipam" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.596446 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="e460e258-aa7f-4839-9443-50b9afe4557b" containerName="nova-edpm-deployment-openstack-edpm-ipam" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.596713 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="e460e258-aa7f-4839-9443-50b9afe4557b" containerName="nova-edpm-deployment-openstack-edpm-ipam" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.597652 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.599992 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.600278 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hgp7p" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.600524 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.602468 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.602672 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.611279 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf"] Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.714097 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.714193 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.714261 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.714305 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.714352 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbd42\" (UniqueName: \"kubernetes.io/projected/13b74976-c0e2-4461-a564-de6ce88aa549-kube-api-access-qbd42\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.714381 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.714401 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.816482 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbd42\" (UniqueName: \"kubernetes.io/projected/13b74976-c0e2-4461-a564-de6ce88aa549-kube-api-access-qbd42\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.816554 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.816586 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.816625 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.816671 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.816752 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.816793 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.820826 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.821069 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.823320 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.834833 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.837498 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbd42\" (UniqueName: \"kubernetes.io/projected/13b74976-c0e2-4461-a564-de6ce88aa549-kube-api-access-qbd42\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.843778 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.846901 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:21 crc kubenswrapper[5048]: I1213 07:14:21.931719 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:14:22 crc kubenswrapper[5048]: I1213 07:14:22.596230 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf"] Dec 13 07:14:23 crc kubenswrapper[5048]: I1213 07:14:23.568802 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" event={"ID":"13b74976-c0e2-4461-a564-de6ce88aa549","Type":"ContainerStarted","Data":"23d57d061838dbf14fe02d43beef807cdc92971c18b2fc00547fb3ebe7a01aae"} Dec 13 07:14:24 crc kubenswrapper[5048]: I1213 07:14:24.578140 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" event={"ID":"13b74976-c0e2-4461-a564-de6ce88aa549","Type":"ContainerStarted","Data":"64441a4fcc3abfe340917d485f4b60ba3372dcc78c24d845b8521e26dac1ca88"} Dec 13 07:14:24 crc kubenswrapper[5048]: I1213 07:14:24.599811 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" podStartSLOduration=3.140176029 podStartE2EDuration="3.599792105s" podCreationTimestamp="2025-12-13 07:14:21 +0000 UTC" firstStartedPulling="2025-12-13 07:14:22.599526807 +0000 UTC m=+2696.466121388" lastFinishedPulling="2025-12-13 07:14:23.059142883 +0000 UTC m=+2696.925737464" observedRunningTime="2025-12-13 07:14:24.596013614 +0000 UTC m=+2698.462608205" watchObservedRunningTime="2025-12-13 07:14:24.599792105 +0000 UTC m=+2698.466386686" Dec 13 07:15:00 crc kubenswrapper[5048]: I1213 07:15:00.169124 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426835-54jqz"] Dec 13 07:15:00 crc kubenswrapper[5048]: I1213 07:15:00.171349 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426835-54jqz" Dec 13 07:15:00 crc kubenswrapper[5048]: I1213 07:15:00.174553 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 13 07:15:00 crc kubenswrapper[5048]: I1213 07:15:00.174801 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 13 07:15:00 crc kubenswrapper[5048]: I1213 07:15:00.190568 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426835-54jqz"] Dec 13 07:15:00 crc kubenswrapper[5048]: I1213 07:15:00.248243 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/11969d0d-33d2-41bd-9829-1b0a73458e96-config-volume\") pod \"collect-profiles-29426835-54jqz\" (UID: \"11969d0d-33d2-41bd-9829-1b0a73458e96\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426835-54jqz" Dec 13 07:15:00 crc kubenswrapper[5048]: I1213 07:15:00.248290 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/11969d0d-33d2-41bd-9829-1b0a73458e96-secret-volume\") pod \"collect-profiles-29426835-54jqz\" (UID: \"11969d0d-33d2-41bd-9829-1b0a73458e96\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426835-54jqz" Dec 13 07:15:00 crc kubenswrapper[5048]: I1213 07:15:00.248358 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9hg9n\" (UniqueName: \"kubernetes.io/projected/11969d0d-33d2-41bd-9829-1b0a73458e96-kube-api-access-9hg9n\") pod \"collect-profiles-29426835-54jqz\" (UID: \"11969d0d-33d2-41bd-9829-1b0a73458e96\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426835-54jqz" Dec 13 07:15:00 crc kubenswrapper[5048]: I1213 07:15:00.349898 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/11969d0d-33d2-41bd-9829-1b0a73458e96-secret-volume\") pod \"collect-profiles-29426835-54jqz\" (UID: \"11969d0d-33d2-41bd-9829-1b0a73458e96\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426835-54jqz" Dec 13 07:15:00 crc kubenswrapper[5048]: I1213 07:15:00.350011 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9hg9n\" (UniqueName: \"kubernetes.io/projected/11969d0d-33d2-41bd-9829-1b0a73458e96-kube-api-access-9hg9n\") pod \"collect-profiles-29426835-54jqz\" (UID: \"11969d0d-33d2-41bd-9829-1b0a73458e96\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426835-54jqz" Dec 13 07:15:00 crc kubenswrapper[5048]: I1213 07:15:00.350188 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/11969d0d-33d2-41bd-9829-1b0a73458e96-config-volume\") pod \"collect-profiles-29426835-54jqz\" (UID: \"11969d0d-33d2-41bd-9829-1b0a73458e96\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426835-54jqz" Dec 13 07:15:00 crc kubenswrapper[5048]: I1213 07:15:00.351341 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/11969d0d-33d2-41bd-9829-1b0a73458e96-config-volume\") pod \"collect-profiles-29426835-54jqz\" (UID: \"11969d0d-33d2-41bd-9829-1b0a73458e96\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426835-54jqz" Dec 13 07:15:00 crc kubenswrapper[5048]: I1213 07:15:00.358722 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/11969d0d-33d2-41bd-9829-1b0a73458e96-secret-volume\") pod \"collect-profiles-29426835-54jqz\" (UID: \"11969d0d-33d2-41bd-9829-1b0a73458e96\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426835-54jqz" Dec 13 07:15:00 crc kubenswrapper[5048]: I1213 07:15:00.370560 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9hg9n\" (UniqueName: \"kubernetes.io/projected/11969d0d-33d2-41bd-9829-1b0a73458e96-kube-api-access-9hg9n\") pod \"collect-profiles-29426835-54jqz\" (UID: \"11969d0d-33d2-41bd-9829-1b0a73458e96\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426835-54jqz" Dec 13 07:15:00 crc kubenswrapper[5048]: I1213 07:15:00.500813 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426835-54jqz" Dec 13 07:15:00 crc kubenswrapper[5048]: W1213 07:15:00.970735 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod11969d0d_33d2_41bd_9829_1b0a73458e96.slice/crio-655cd3f84ac1b4bcd64a6bd7192ea12b585e340bb29200c6daddf57d685ccd09 WatchSource:0}: Error finding container 655cd3f84ac1b4bcd64a6bd7192ea12b585e340bb29200c6daddf57d685ccd09: Status 404 returned error can't find the container with id 655cd3f84ac1b4bcd64a6bd7192ea12b585e340bb29200c6daddf57d685ccd09 Dec 13 07:15:00 crc kubenswrapper[5048]: I1213 07:15:00.975233 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426835-54jqz"] Dec 13 07:15:01 crc kubenswrapper[5048]: I1213 07:15:01.924199 5048 generic.go:334] "Generic (PLEG): container finished" podID="11969d0d-33d2-41bd-9829-1b0a73458e96" containerID="8f9875b125f35ff0c47cb6b04eb29e9df7e217256e7d7159cf06e129246a20ab" exitCode=0 Dec 13 07:15:01 crc kubenswrapper[5048]: I1213 07:15:01.924322 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29426835-54jqz" event={"ID":"11969d0d-33d2-41bd-9829-1b0a73458e96","Type":"ContainerDied","Data":"8f9875b125f35ff0c47cb6b04eb29e9df7e217256e7d7159cf06e129246a20ab"} Dec 13 07:15:01 crc kubenswrapper[5048]: I1213 07:15:01.924765 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29426835-54jqz" event={"ID":"11969d0d-33d2-41bd-9829-1b0a73458e96","Type":"ContainerStarted","Data":"655cd3f84ac1b4bcd64a6bd7192ea12b585e340bb29200c6daddf57d685ccd09"} Dec 13 07:15:03 crc kubenswrapper[5048]: I1213 07:15:03.294495 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426835-54jqz" Dec 13 07:15:03 crc kubenswrapper[5048]: I1213 07:15:03.408624 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/11969d0d-33d2-41bd-9829-1b0a73458e96-config-volume\") pod \"11969d0d-33d2-41bd-9829-1b0a73458e96\" (UID: \"11969d0d-33d2-41bd-9829-1b0a73458e96\") " Dec 13 07:15:03 crc kubenswrapper[5048]: I1213 07:15:03.408678 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9hg9n\" (UniqueName: \"kubernetes.io/projected/11969d0d-33d2-41bd-9829-1b0a73458e96-kube-api-access-9hg9n\") pod \"11969d0d-33d2-41bd-9829-1b0a73458e96\" (UID: \"11969d0d-33d2-41bd-9829-1b0a73458e96\") " Dec 13 07:15:03 crc kubenswrapper[5048]: I1213 07:15:03.408837 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/11969d0d-33d2-41bd-9829-1b0a73458e96-secret-volume\") pod \"11969d0d-33d2-41bd-9829-1b0a73458e96\" (UID: \"11969d0d-33d2-41bd-9829-1b0a73458e96\") " Dec 13 07:15:03 crc kubenswrapper[5048]: I1213 07:15:03.409526 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11969d0d-33d2-41bd-9829-1b0a73458e96-config-volume" (OuterVolumeSpecName: "config-volume") pod "11969d0d-33d2-41bd-9829-1b0a73458e96" (UID: "11969d0d-33d2-41bd-9829-1b0a73458e96"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 07:15:03 crc kubenswrapper[5048]: I1213 07:15:03.414583 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11969d0d-33d2-41bd-9829-1b0a73458e96-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "11969d0d-33d2-41bd-9829-1b0a73458e96" (UID: "11969d0d-33d2-41bd-9829-1b0a73458e96"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:15:03 crc kubenswrapper[5048]: I1213 07:15:03.418707 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11969d0d-33d2-41bd-9829-1b0a73458e96-kube-api-access-9hg9n" (OuterVolumeSpecName: "kube-api-access-9hg9n") pod "11969d0d-33d2-41bd-9829-1b0a73458e96" (UID: "11969d0d-33d2-41bd-9829-1b0a73458e96"). InnerVolumeSpecName "kube-api-access-9hg9n". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:15:03 crc kubenswrapper[5048]: I1213 07:15:03.511013 5048 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/11969d0d-33d2-41bd-9829-1b0a73458e96-config-volume\") on node \"crc\" DevicePath \"\"" Dec 13 07:15:03 crc kubenswrapper[5048]: I1213 07:15:03.511221 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9hg9n\" (UniqueName: \"kubernetes.io/projected/11969d0d-33d2-41bd-9829-1b0a73458e96-kube-api-access-9hg9n\") on node \"crc\" DevicePath \"\"" Dec 13 07:15:03 crc kubenswrapper[5048]: I1213 07:15:03.511280 5048 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/11969d0d-33d2-41bd-9829-1b0a73458e96-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 13 07:15:03 crc kubenswrapper[5048]: I1213 07:15:03.944325 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29426835-54jqz" event={"ID":"11969d0d-33d2-41bd-9829-1b0a73458e96","Type":"ContainerDied","Data":"655cd3f84ac1b4bcd64a6bd7192ea12b585e340bb29200c6daddf57d685ccd09"} Dec 13 07:15:03 crc kubenswrapper[5048]: I1213 07:15:03.944373 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="655cd3f84ac1b4bcd64a6bd7192ea12b585e340bb29200c6daddf57d685ccd09" Dec 13 07:15:03 crc kubenswrapper[5048]: I1213 07:15:03.944400 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426835-54jqz" Dec 13 07:15:04 crc kubenswrapper[5048]: I1213 07:15:04.376007 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq"] Dec 13 07:15:04 crc kubenswrapper[5048]: I1213 07:15:04.384281 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426790-559jq"] Dec 13 07:15:04 crc kubenswrapper[5048]: I1213 07:15:04.578326 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a458fdb3-0662-44a0-8df6-b81dcb66a669" path="/var/lib/kubelet/pods/a458fdb3-0662-44a0-8df6-b81dcb66a669/volumes" Dec 13 07:15:46 crc kubenswrapper[5048]: I1213 07:15:46.215776 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:15:46 crc kubenswrapper[5048]: I1213 07:15:46.216375 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:15:54 crc kubenswrapper[5048]: I1213 07:15:54.159404 5048 scope.go:117] "RemoveContainer" containerID="9bdf2d1fabf5ccd8b64476b4f827e578a64b1d323d3bd599d2c6ac22bcdef70f" Dec 13 07:16:16 crc kubenswrapper[5048]: I1213 07:16:16.215911 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:16:16 crc kubenswrapper[5048]: I1213 07:16:16.216537 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:16:42 crc kubenswrapper[5048]: I1213 07:16:42.826665 5048 generic.go:334] "Generic (PLEG): container finished" podID="13b74976-c0e2-4461-a564-de6ce88aa549" containerID="64441a4fcc3abfe340917d485f4b60ba3372dcc78c24d845b8521e26dac1ca88" exitCode=0 Dec 13 07:16:42 crc kubenswrapper[5048]: I1213 07:16:42.826768 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" event={"ID":"13b74976-c0e2-4461-a564-de6ce88aa549","Type":"ContainerDied","Data":"64441a4fcc3abfe340917d485f4b60ba3372dcc78c24d845b8521e26dac1ca88"} Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.278789 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.436051 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-inventory\") pod \"13b74976-c0e2-4461-a564-de6ce88aa549\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.436097 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ceilometer-compute-config-data-0\") pod \"13b74976-c0e2-4461-a564-de6ce88aa549\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.436192 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ssh-key\") pod \"13b74976-c0e2-4461-a564-de6ce88aa549\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.436268 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-telemetry-combined-ca-bundle\") pod \"13b74976-c0e2-4461-a564-de6ce88aa549\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.436321 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ceilometer-compute-config-data-2\") pod \"13b74976-c0e2-4461-a564-de6ce88aa549\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.436352 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qbd42\" (UniqueName: \"kubernetes.io/projected/13b74976-c0e2-4461-a564-de6ce88aa549-kube-api-access-qbd42\") pod \"13b74976-c0e2-4461-a564-de6ce88aa549\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.436400 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ceilometer-compute-config-data-1\") pod \"13b74976-c0e2-4461-a564-de6ce88aa549\" (UID: \"13b74976-c0e2-4461-a564-de6ce88aa549\") " Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.444232 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "13b74976-c0e2-4461-a564-de6ce88aa549" (UID: "13b74976-c0e2-4461-a564-de6ce88aa549"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.445326 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13b74976-c0e2-4461-a564-de6ce88aa549-kube-api-access-qbd42" (OuterVolumeSpecName: "kube-api-access-qbd42") pod "13b74976-c0e2-4461-a564-de6ce88aa549" (UID: "13b74976-c0e2-4461-a564-de6ce88aa549"). InnerVolumeSpecName "kube-api-access-qbd42". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.468159 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "13b74976-c0e2-4461-a564-de6ce88aa549" (UID: "13b74976-c0e2-4461-a564-de6ce88aa549"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.468824 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "13b74976-c0e2-4461-a564-de6ce88aa549" (UID: "13b74976-c0e2-4461-a564-de6ce88aa549"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.481231 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "13b74976-c0e2-4461-a564-de6ce88aa549" (UID: "13b74976-c0e2-4461-a564-de6ce88aa549"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.490211 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "13b74976-c0e2-4461-a564-de6ce88aa549" (UID: "13b74976-c0e2-4461-a564-de6ce88aa549"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.494315 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-inventory" (OuterVolumeSpecName: "inventory") pod "13b74976-c0e2-4461-a564-de6ce88aa549" (UID: "13b74976-c0e2-4461-a564-de6ce88aa549"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.538427 5048 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.538504 5048 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.538540 5048 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.538560 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qbd42\" (UniqueName: \"kubernetes.io/projected/13b74976-c0e2-4461-a564-de6ce88aa549-kube-api-access-qbd42\") on node \"crc\" DevicePath \"\"" Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.538580 5048 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.538598 5048 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-inventory\") on node \"crc\" DevicePath \"\"" Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.538615 5048 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/13b74976-c0e2-4461-a564-de6ce88aa549-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.845768 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" event={"ID":"13b74976-c0e2-4461-a564-de6ce88aa549","Type":"ContainerDied","Data":"23d57d061838dbf14fe02d43beef807cdc92971c18b2fc00547fb3ebe7a01aae"} Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.845815 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="23d57d061838dbf14fe02d43beef807cdc92971c18b2fc00547fb3ebe7a01aae" Dec 13 07:16:44 crc kubenswrapper[5048]: I1213 07:16:44.845825 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf" Dec 13 07:16:46 crc kubenswrapper[5048]: I1213 07:16:46.216039 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:16:46 crc kubenswrapper[5048]: I1213 07:16:46.216481 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:16:46 crc kubenswrapper[5048]: I1213 07:16:46.216535 5048 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 07:16:46 crc kubenswrapper[5048]: I1213 07:16:46.217302 5048 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8a03ae6bfe080367b968888a6b5a1193924bceab28a806b9d3c7ee5f1f18acb6"} pod="openshift-machine-config-operator/machine-config-daemon-j7hns" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 13 07:16:46 crc kubenswrapper[5048]: I1213 07:16:46.217396 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" containerID="cri-o://8a03ae6bfe080367b968888a6b5a1193924bceab28a806b9d3c7ee5f1f18acb6" gracePeriod=600 Dec 13 07:16:46 crc kubenswrapper[5048]: I1213 07:16:46.870876 5048 generic.go:334] "Generic (PLEG): container finished" podID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerID="8a03ae6bfe080367b968888a6b5a1193924bceab28a806b9d3c7ee5f1f18acb6" exitCode=0 Dec 13 07:16:46 crc kubenswrapper[5048]: I1213 07:16:46.871028 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerDied","Data":"8a03ae6bfe080367b968888a6b5a1193924bceab28a806b9d3c7ee5f1f18acb6"} Dec 13 07:16:46 crc kubenswrapper[5048]: I1213 07:16:46.871313 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerStarted","Data":"fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023"} Dec 13 07:16:46 crc kubenswrapper[5048]: I1213 07:16:46.871340 5048 scope.go:117] "RemoveContainer" containerID="13a24424a3c51e9ddc7d4af672496f5caa198f8ddf1e75b5f564a337c4d739d6" Dec 13 07:17:40 crc kubenswrapper[5048]: I1213 07:17:40.842900 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-22bfk"] Dec 13 07:17:40 crc kubenswrapper[5048]: E1213 07:17:40.843891 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11969d0d-33d2-41bd-9829-1b0a73458e96" containerName="collect-profiles" Dec 13 07:17:40 crc kubenswrapper[5048]: I1213 07:17:40.843906 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="11969d0d-33d2-41bd-9829-1b0a73458e96" containerName="collect-profiles" Dec 13 07:17:40 crc kubenswrapper[5048]: E1213 07:17:40.843943 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13b74976-c0e2-4461-a564-de6ce88aa549" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 13 07:17:40 crc kubenswrapper[5048]: I1213 07:17:40.843952 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="13b74976-c0e2-4461-a564-de6ce88aa549" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 13 07:17:40 crc kubenswrapper[5048]: I1213 07:17:40.844168 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="13b74976-c0e2-4461-a564-de6ce88aa549" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 13 07:17:40 crc kubenswrapper[5048]: I1213 07:17:40.844186 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="11969d0d-33d2-41bd-9829-1b0a73458e96" containerName="collect-profiles" Dec 13 07:17:40 crc kubenswrapper[5048]: I1213 07:17:40.845612 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-22bfk" Dec 13 07:17:40 crc kubenswrapper[5048]: I1213 07:17:40.861202 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-22bfk"] Dec 13 07:17:40 crc kubenswrapper[5048]: I1213 07:17:40.990534 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v48ww\" (UniqueName: \"kubernetes.io/projected/9b8ba81c-7f23-4cf7-9e2d-3cde833815bb-kube-api-access-v48ww\") pod \"redhat-marketplace-22bfk\" (UID: \"9b8ba81c-7f23-4cf7-9e2d-3cde833815bb\") " pod="openshift-marketplace/redhat-marketplace-22bfk" Dec 13 07:17:40 crc kubenswrapper[5048]: I1213 07:17:40.990827 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b8ba81c-7f23-4cf7-9e2d-3cde833815bb-utilities\") pod \"redhat-marketplace-22bfk\" (UID: \"9b8ba81c-7f23-4cf7-9e2d-3cde833815bb\") " pod="openshift-marketplace/redhat-marketplace-22bfk" Dec 13 07:17:40 crc kubenswrapper[5048]: I1213 07:17:40.990940 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b8ba81c-7f23-4cf7-9e2d-3cde833815bb-catalog-content\") pod \"redhat-marketplace-22bfk\" (UID: \"9b8ba81c-7f23-4cf7-9e2d-3cde833815bb\") " pod="openshift-marketplace/redhat-marketplace-22bfk" Dec 13 07:17:41 crc kubenswrapper[5048]: I1213 07:17:41.093181 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v48ww\" (UniqueName: \"kubernetes.io/projected/9b8ba81c-7f23-4cf7-9e2d-3cde833815bb-kube-api-access-v48ww\") pod \"redhat-marketplace-22bfk\" (UID: \"9b8ba81c-7f23-4cf7-9e2d-3cde833815bb\") " pod="openshift-marketplace/redhat-marketplace-22bfk" Dec 13 07:17:41 crc kubenswrapper[5048]: I1213 07:17:41.093266 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b8ba81c-7f23-4cf7-9e2d-3cde833815bb-utilities\") pod \"redhat-marketplace-22bfk\" (UID: \"9b8ba81c-7f23-4cf7-9e2d-3cde833815bb\") " pod="openshift-marketplace/redhat-marketplace-22bfk" Dec 13 07:17:41 crc kubenswrapper[5048]: I1213 07:17:41.093300 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b8ba81c-7f23-4cf7-9e2d-3cde833815bb-catalog-content\") pod \"redhat-marketplace-22bfk\" (UID: \"9b8ba81c-7f23-4cf7-9e2d-3cde833815bb\") " pod="openshift-marketplace/redhat-marketplace-22bfk" Dec 13 07:17:41 crc kubenswrapper[5048]: I1213 07:17:41.094061 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b8ba81c-7f23-4cf7-9e2d-3cde833815bb-utilities\") pod \"redhat-marketplace-22bfk\" (UID: \"9b8ba81c-7f23-4cf7-9e2d-3cde833815bb\") " pod="openshift-marketplace/redhat-marketplace-22bfk" Dec 13 07:17:41 crc kubenswrapper[5048]: I1213 07:17:41.094072 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b8ba81c-7f23-4cf7-9e2d-3cde833815bb-catalog-content\") pod \"redhat-marketplace-22bfk\" (UID: \"9b8ba81c-7f23-4cf7-9e2d-3cde833815bb\") " pod="openshift-marketplace/redhat-marketplace-22bfk" Dec 13 07:17:41 crc kubenswrapper[5048]: I1213 07:17:41.116387 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v48ww\" (UniqueName: \"kubernetes.io/projected/9b8ba81c-7f23-4cf7-9e2d-3cde833815bb-kube-api-access-v48ww\") pod \"redhat-marketplace-22bfk\" (UID: \"9b8ba81c-7f23-4cf7-9e2d-3cde833815bb\") " pod="openshift-marketplace/redhat-marketplace-22bfk" Dec 13 07:17:41 crc kubenswrapper[5048]: I1213 07:17:41.175604 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-22bfk" Dec 13 07:17:41 crc kubenswrapper[5048]: I1213 07:17:41.663488 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-22bfk"] Dec 13 07:17:42 crc kubenswrapper[5048]: I1213 07:17:42.396191 5048 generic.go:334] "Generic (PLEG): container finished" podID="9b8ba81c-7f23-4cf7-9e2d-3cde833815bb" containerID="939a2c88dab0ecf84c1183f181d2282a4bdb48f130c82ce3311eed364975b1cf" exitCode=0 Dec 13 07:17:42 crc kubenswrapper[5048]: I1213 07:17:42.396315 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-22bfk" event={"ID":"9b8ba81c-7f23-4cf7-9e2d-3cde833815bb","Type":"ContainerDied","Data":"939a2c88dab0ecf84c1183f181d2282a4bdb48f130c82ce3311eed364975b1cf"} Dec 13 07:17:42 crc kubenswrapper[5048]: I1213 07:17:42.396888 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-22bfk" event={"ID":"9b8ba81c-7f23-4cf7-9e2d-3cde833815bb","Type":"ContainerStarted","Data":"e1fe8a92cc0d7d1e232f90afc5bc91a31aa857d0cdef12a6b485a300e695c270"} Dec 13 07:17:42 crc kubenswrapper[5048]: I1213 07:17:42.398782 5048 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 13 07:17:43 crc kubenswrapper[5048]: I1213 07:17:43.414659 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-22bfk" event={"ID":"9b8ba81c-7f23-4cf7-9e2d-3cde833815bb","Type":"ContainerStarted","Data":"402b40d9ae03b2ccaa175a72c21c01a8964663a83af49b3e5eadc7abc6c995ba"} Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.428695 5048 generic.go:334] "Generic (PLEG): container finished" podID="9b8ba81c-7f23-4cf7-9e2d-3cde833815bb" containerID="402b40d9ae03b2ccaa175a72c21c01a8964663a83af49b3e5eadc7abc6c995ba" exitCode=0 Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.428760 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-22bfk" event={"ID":"9b8ba81c-7f23-4cf7-9e2d-3cde833815bb","Type":"ContainerDied","Data":"402b40d9ae03b2ccaa175a72c21c01a8964663a83af49b3e5eadc7abc6c995ba"} Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.497205 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.498740 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.503090 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.503133 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.503146 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.503263 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-g44hk" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.504758 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.662655 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.662720 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-config-data\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.662801 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.662828 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xs2d8\" (UniqueName: \"kubernetes.io/projected/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-kube-api-access-xs2d8\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.663003 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.663066 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.663101 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.663296 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.663380 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.765158 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.765215 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-config-data\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.765304 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.765336 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xs2d8\" (UniqueName: \"kubernetes.io/projected/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-kube-api-access-xs2d8\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.765384 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.765423 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.765492 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.765541 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.765573 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.765998 5048 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.766308 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.766977 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.768143 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-config-data\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.768177 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.775230 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.780271 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.785929 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.796856 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xs2d8\" (UniqueName: \"kubernetes.io/projected/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-kube-api-access-xs2d8\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.806345 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"tempest-tests-tempest\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " pod="openstack/tempest-tests-tempest" Dec 13 07:17:44 crc kubenswrapper[5048]: I1213 07:17:44.828803 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Dec 13 07:17:45 crc kubenswrapper[5048]: I1213 07:17:45.296874 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Dec 13 07:17:45 crc kubenswrapper[5048]: W1213 07:17:45.311331 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb1419e46_ce93_452b_9d88_b9a50e9dbfe6.slice/crio-5af9a89c2dbd5f3b0ae4587b27ac05b508f29e462e070d911e8e97bbda108d8d WatchSource:0}: Error finding container 5af9a89c2dbd5f3b0ae4587b27ac05b508f29e462e070d911e8e97bbda108d8d: Status 404 returned error can't find the container with id 5af9a89c2dbd5f3b0ae4587b27ac05b508f29e462e070d911e8e97bbda108d8d Dec 13 07:17:45 crc kubenswrapper[5048]: I1213 07:17:45.439380 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-22bfk" event={"ID":"9b8ba81c-7f23-4cf7-9e2d-3cde833815bb","Type":"ContainerStarted","Data":"15a42f38db08bd7391dbfbae00b26e8b99456d5c183443023423aec79b1e3049"} Dec 13 07:17:45 crc kubenswrapper[5048]: I1213 07:17:45.440930 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"b1419e46-ce93-452b-9d88-b9a50e9dbfe6","Type":"ContainerStarted","Data":"5af9a89c2dbd5f3b0ae4587b27ac05b508f29e462e070d911e8e97bbda108d8d"} Dec 13 07:17:45 crc kubenswrapper[5048]: I1213 07:17:45.461671 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-22bfk" podStartSLOduration=2.70312201 podStartE2EDuration="5.46164735s" podCreationTimestamp="2025-12-13 07:17:40 +0000 UTC" firstStartedPulling="2025-12-13 07:17:42.398419143 +0000 UTC m=+2896.265013724" lastFinishedPulling="2025-12-13 07:17:45.156944483 +0000 UTC m=+2899.023539064" observedRunningTime="2025-12-13 07:17:45.455957222 +0000 UTC m=+2899.322551863" watchObservedRunningTime="2025-12-13 07:17:45.46164735 +0000 UTC m=+2899.328241971" Dec 13 07:17:51 crc kubenswrapper[5048]: I1213 07:17:51.175812 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-22bfk" Dec 13 07:17:51 crc kubenswrapper[5048]: I1213 07:17:51.176490 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-22bfk" Dec 13 07:17:51 crc kubenswrapper[5048]: I1213 07:17:51.226423 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-22bfk" Dec 13 07:17:51 crc kubenswrapper[5048]: I1213 07:17:51.556732 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-22bfk" Dec 13 07:17:51 crc kubenswrapper[5048]: I1213 07:17:51.614079 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-22bfk"] Dec 13 07:17:53 crc kubenswrapper[5048]: I1213 07:17:53.530626 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-22bfk" podUID="9b8ba81c-7f23-4cf7-9e2d-3cde833815bb" containerName="registry-server" containerID="cri-o://15a42f38db08bd7391dbfbae00b26e8b99456d5c183443023423aec79b1e3049" gracePeriod=2 Dec 13 07:17:55 crc kubenswrapper[5048]: I1213 07:17:55.552559 5048 generic.go:334] "Generic (PLEG): container finished" podID="9b8ba81c-7f23-4cf7-9e2d-3cde833815bb" containerID="15a42f38db08bd7391dbfbae00b26e8b99456d5c183443023423aec79b1e3049" exitCode=0 Dec 13 07:17:55 crc kubenswrapper[5048]: I1213 07:17:55.552655 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-22bfk" event={"ID":"9b8ba81c-7f23-4cf7-9e2d-3cde833815bb","Type":"ContainerDied","Data":"15a42f38db08bd7391dbfbae00b26e8b99456d5c183443023423aec79b1e3049"} Dec 13 07:18:01 crc kubenswrapper[5048]: E1213 07:18:01.176401 5048 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 15a42f38db08bd7391dbfbae00b26e8b99456d5c183443023423aec79b1e3049 is running failed: container process not found" containerID="15a42f38db08bd7391dbfbae00b26e8b99456d5c183443023423aec79b1e3049" cmd=["grpc_health_probe","-addr=:50051"] Dec 13 07:18:01 crc kubenswrapper[5048]: E1213 07:18:01.177392 5048 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 15a42f38db08bd7391dbfbae00b26e8b99456d5c183443023423aec79b1e3049 is running failed: container process not found" containerID="15a42f38db08bd7391dbfbae00b26e8b99456d5c183443023423aec79b1e3049" cmd=["grpc_health_probe","-addr=:50051"] Dec 13 07:18:01 crc kubenswrapper[5048]: E1213 07:18:01.177736 5048 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 15a42f38db08bd7391dbfbae00b26e8b99456d5c183443023423aec79b1e3049 is running failed: container process not found" containerID="15a42f38db08bd7391dbfbae00b26e8b99456d5c183443023423aec79b1e3049" cmd=["grpc_health_probe","-addr=:50051"] Dec 13 07:18:01 crc kubenswrapper[5048]: E1213 07:18:01.177764 5048 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 15a42f38db08bd7391dbfbae00b26e8b99456d5c183443023423aec79b1e3049 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-22bfk" podUID="9b8ba81c-7f23-4cf7-9e2d-3cde833815bb" containerName="registry-server" Dec 13 07:18:11 crc kubenswrapper[5048]: E1213 07:18:11.177266 5048 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 15a42f38db08bd7391dbfbae00b26e8b99456d5c183443023423aec79b1e3049 is running failed: container process not found" containerID="15a42f38db08bd7391dbfbae00b26e8b99456d5c183443023423aec79b1e3049" cmd=["grpc_health_probe","-addr=:50051"] Dec 13 07:18:11 crc kubenswrapper[5048]: E1213 07:18:11.178469 5048 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 15a42f38db08bd7391dbfbae00b26e8b99456d5c183443023423aec79b1e3049 is running failed: container process not found" containerID="15a42f38db08bd7391dbfbae00b26e8b99456d5c183443023423aec79b1e3049" cmd=["grpc_health_probe","-addr=:50051"] Dec 13 07:18:11 crc kubenswrapper[5048]: E1213 07:18:11.178867 5048 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 15a42f38db08bd7391dbfbae00b26e8b99456d5c183443023423aec79b1e3049 is running failed: container process not found" containerID="15a42f38db08bd7391dbfbae00b26e8b99456d5c183443023423aec79b1e3049" cmd=["grpc_health_probe","-addr=:50051"] Dec 13 07:18:11 crc kubenswrapper[5048]: E1213 07:18:11.178922 5048 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 15a42f38db08bd7391dbfbae00b26e8b99456d5c183443023423aec79b1e3049 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-22bfk" podUID="9b8ba81c-7f23-4cf7-9e2d-3cde833815bb" containerName="registry-server" Dec 13 07:18:13 crc kubenswrapper[5048]: I1213 07:18:13.462522 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-22bfk" Dec 13 07:18:13 crc kubenswrapper[5048]: E1213 07:18:13.493764 5048 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Dec 13 07:18:13 crc kubenswrapper[5048]: E1213 07:18:13.494004 5048 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xs2d8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(b1419e46-ce93-452b-9d88-b9a50e9dbfe6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 13 07:18:13 crc kubenswrapper[5048]: E1213 07:18:13.495668 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="b1419e46-ce93-452b-9d88-b9a50e9dbfe6" Dec 13 07:18:13 crc kubenswrapper[5048]: I1213 07:18:13.551621 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b8ba81c-7f23-4cf7-9e2d-3cde833815bb-utilities\") pod \"9b8ba81c-7f23-4cf7-9e2d-3cde833815bb\" (UID: \"9b8ba81c-7f23-4cf7-9e2d-3cde833815bb\") " Dec 13 07:18:13 crc kubenswrapper[5048]: I1213 07:18:13.551705 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v48ww\" (UniqueName: \"kubernetes.io/projected/9b8ba81c-7f23-4cf7-9e2d-3cde833815bb-kube-api-access-v48ww\") pod \"9b8ba81c-7f23-4cf7-9e2d-3cde833815bb\" (UID: \"9b8ba81c-7f23-4cf7-9e2d-3cde833815bb\") " Dec 13 07:18:13 crc kubenswrapper[5048]: I1213 07:18:13.551829 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b8ba81c-7f23-4cf7-9e2d-3cde833815bb-catalog-content\") pod \"9b8ba81c-7f23-4cf7-9e2d-3cde833815bb\" (UID: \"9b8ba81c-7f23-4cf7-9e2d-3cde833815bb\") " Dec 13 07:18:13 crc kubenswrapper[5048]: I1213 07:18:13.552256 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b8ba81c-7f23-4cf7-9e2d-3cde833815bb-utilities" (OuterVolumeSpecName: "utilities") pod "9b8ba81c-7f23-4cf7-9e2d-3cde833815bb" (UID: "9b8ba81c-7f23-4cf7-9e2d-3cde833815bb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:18:13 crc kubenswrapper[5048]: I1213 07:18:13.558204 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b8ba81c-7f23-4cf7-9e2d-3cde833815bb-kube-api-access-v48ww" (OuterVolumeSpecName: "kube-api-access-v48ww") pod "9b8ba81c-7f23-4cf7-9e2d-3cde833815bb" (UID: "9b8ba81c-7f23-4cf7-9e2d-3cde833815bb"). InnerVolumeSpecName "kube-api-access-v48ww". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:18:13 crc kubenswrapper[5048]: I1213 07:18:13.571514 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b8ba81c-7f23-4cf7-9e2d-3cde833815bb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9b8ba81c-7f23-4cf7-9e2d-3cde833815bb" (UID: "9b8ba81c-7f23-4cf7-9e2d-3cde833815bb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:18:13 crc kubenswrapper[5048]: I1213 07:18:13.653898 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b8ba81c-7f23-4cf7-9e2d-3cde833815bb-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 07:18:13 crc kubenswrapper[5048]: I1213 07:18:13.653927 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v48ww\" (UniqueName: \"kubernetes.io/projected/9b8ba81c-7f23-4cf7-9e2d-3cde833815bb-kube-api-access-v48ww\") on node \"crc\" DevicePath \"\"" Dec 13 07:18:13 crc kubenswrapper[5048]: I1213 07:18:13.653940 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b8ba81c-7f23-4cf7-9e2d-3cde833815bb-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 07:18:13 crc kubenswrapper[5048]: I1213 07:18:13.774108 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-22bfk" Dec 13 07:18:13 crc kubenswrapper[5048]: I1213 07:18:13.774124 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-22bfk" event={"ID":"9b8ba81c-7f23-4cf7-9e2d-3cde833815bb","Type":"ContainerDied","Data":"e1fe8a92cc0d7d1e232f90afc5bc91a31aa857d0cdef12a6b485a300e695c270"} Dec 13 07:18:13 crc kubenswrapper[5048]: I1213 07:18:13.774197 5048 scope.go:117] "RemoveContainer" containerID="15a42f38db08bd7391dbfbae00b26e8b99456d5c183443023423aec79b1e3049" Dec 13 07:18:13 crc kubenswrapper[5048]: E1213 07:18:13.776292 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="b1419e46-ce93-452b-9d88-b9a50e9dbfe6" Dec 13 07:18:13 crc kubenswrapper[5048]: I1213 07:18:13.810762 5048 scope.go:117] "RemoveContainer" containerID="402b40d9ae03b2ccaa175a72c21c01a8964663a83af49b3e5eadc7abc6c995ba" Dec 13 07:18:13 crc kubenswrapper[5048]: I1213 07:18:13.832853 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-22bfk"] Dec 13 07:18:13 crc kubenswrapper[5048]: I1213 07:18:13.841195 5048 scope.go:117] "RemoveContainer" containerID="939a2c88dab0ecf84c1183f181d2282a4bdb48f130c82ce3311eed364975b1cf" Dec 13 07:18:13 crc kubenswrapper[5048]: I1213 07:18:13.843545 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-22bfk"] Dec 13 07:18:14 crc kubenswrapper[5048]: I1213 07:18:14.578686 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b8ba81c-7f23-4cf7-9e2d-3cde833815bb" path="/var/lib/kubelet/pods/9b8ba81c-7f23-4cf7-9e2d-3cde833815bb/volumes" Dec 13 07:18:26 crc kubenswrapper[5048]: I1213 07:18:26.883993 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"b1419e46-ce93-452b-9d88-b9a50e9dbfe6","Type":"ContainerStarted","Data":"49b4813fe256ca1a5852891e6d463c68f80fa4d2692fb2678370533804292f05"} Dec 13 07:18:46 crc kubenswrapper[5048]: I1213 07:18:46.216338 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:18:46 crc kubenswrapper[5048]: I1213 07:18:46.216984 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:19:16 crc kubenswrapper[5048]: I1213 07:19:16.216190 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:19:16 crc kubenswrapper[5048]: I1213 07:19:16.216732 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:19:46 crc kubenswrapper[5048]: I1213 07:19:46.216512 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:19:46 crc kubenswrapper[5048]: I1213 07:19:46.217131 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:19:46 crc kubenswrapper[5048]: I1213 07:19:46.217175 5048 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 07:19:46 crc kubenswrapper[5048]: I1213 07:19:46.217925 5048 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023"} pod="openshift-machine-config-operator/machine-config-daemon-j7hns" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 13 07:19:46 crc kubenswrapper[5048]: I1213 07:19:46.217983 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" containerID="cri-o://fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" gracePeriod=600 Dec 13 07:19:46 crc kubenswrapper[5048]: E1213 07:19:46.347933 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:19:46 crc kubenswrapper[5048]: I1213 07:19:46.772928 5048 generic.go:334] "Generic (PLEG): container finished" podID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" exitCode=0 Dec 13 07:19:46 crc kubenswrapper[5048]: I1213 07:19:46.772994 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerDied","Data":"fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023"} Dec 13 07:19:46 crc kubenswrapper[5048]: I1213 07:19:46.773984 5048 scope.go:117] "RemoveContainer" containerID="8a03ae6bfe080367b968888a6b5a1193924bceab28a806b9d3c7ee5f1f18acb6" Dec 13 07:19:46 crc kubenswrapper[5048]: I1213 07:19:46.775327 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:19:46 crc kubenswrapper[5048]: E1213 07:19:46.775910 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:19:46 crc kubenswrapper[5048]: I1213 07:19:46.796289 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=83.965484134 podStartE2EDuration="2m3.796259929s" podCreationTimestamp="2025-12-13 07:17:43 +0000 UTC" firstStartedPulling="2025-12-13 07:17:45.32592769 +0000 UTC m=+2899.192522291" lastFinishedPulling="2025-12-13 07:18:25.156703495 +0000 UTC m=+2939.023298086" observedRunningTime="2025-12-13 07:18:26.915630262 +0000 UTC m=+2940.782224853" watchObservedRunningTime="2025-12-13 07:19:46.796259929 +0000 UTC m=+3020.662854500" Dec 13 07:19:48 crc kubenswrapper[5048]: I1213 07:19:48.761886 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tcnpx"] Dec 13 07:19:48 crc kubenswrapper[5048]: E1213 07:19:48.762761 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b8ba81c-7f23-4cf7-9e2d-3cde833815bb" containerName="registry-server" Dec 13 07:19:48 crc kubenswrapper[5048]: I1213 07:19:48.762780 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b8ba81c-7f23-4cf7-9e2d-3cde833815bb" containerName="registry-server" Dec 13 07:19:48 crc kubenswrapper[5048]: E1213 07:19:48.762798 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b8ba81c-7f23-4cf7-9e2d-3cde833815bb" containerName="extract-utilities" Dec 13 07:19:48 crc kubenswrapper[5048]: I1213 07:19:48.762806 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b8ba81c-7f23-4cf7-9e2d-3cde833815bb" containerName="extract-utilities" Dec 13 07:19:48 crc kubenswrapper[5048]: E1213 07:19:48.762820 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b8ba81c-7f23-4cf7-9e2d-3cde833815bb" containerName="extract-content" Dec 13 07:19:48 crc kubenswrapper[5048]: I1213 07:19:48.762829 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b8ba81c-7f23-4cf7-9e2d-3cde833815bb" containerName="extract-content" Dec 13 07:19:48 crc kubenswrapper[5048]: I1213 07:19:48.763037 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b8ba81c-7f23-4cf7-9e2d-3cde833815bb" containerName="registry-server" Dec 13 07:19:48 crc kubenswrapper[5048]: I1213 07:19:48.764691 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tcnpx" Dec 13 07:19:48 crc kubenswrapper[5048]: I1213 07:19:48.776796 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tcnpx"] Dec 13 07:19:48 crc kubenswrapper[5048]: I1213 07:19:48.946544 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0675d3da-ae8e-4a43-9b10-9d3314d68465-catalog-content\") pod \"redhat-operators-tcnpx\" (UID: \"0675d3da-ae8e-4a43-9b10-9d3314d68465\") " pod="openshift-marketplace/redhat-operators-tcnpx" Dec 13 07:19:48 crc kubenswrapper[5048]: I1213 07:19:48.946587 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0675d3da-ae8e-4a43-9b10-9d3314d68465-utilities\") pod \"redhat-operators-tcnpx\" (UID: \"0675d3da-ae8e-4a43-9b10-9d3314d68465\") " pod="openshift-marketplace/redhat-operators-tcnpx" Dec 13 07:19:48 crc kubenswrapper[5048]: I1213 07:19:48.946746 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vjmsf\" (UniqueName: \"kubernetes.io/projected/0675d3da-ae8e-4a43-9b10-9d3314d68465-kube-api-access-vjmsf\") pod \"redhat-operators-tcnpx\" (UID: \"0675d3da-ae8e-4a43-9b10-9d3314d68465\") " pod="openshift-marketplace/redhat-operators-tcnpx" Dec 13 07:19:49 crc kubenswrapper[5048]: I1213 07:19:49.048422 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vjmsf\" (UniqueName: \"kubernetes.io/projected/0675d3da-ae8e-4a43-9b10-9d3314d68465-kube-api-access-vjmsf\") pod \"redhat-operators-tcnpx\" (UID: \"0675d3da-ae8e-4a43-9b10-9d3314d68465\") " pod="openshift-marketplace/redhat-operators-tcnpx" Dec 13 07:19:49 crc kubenswrapper[5048]: I1213 07:19:49.048605 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0675d3da-ae8e-4a43-9b10-9d3314d68465-catalog-content\") pod \"redhat-operators-tcnpx\" (UID: \"0675d3da-ae8e-4a43-9b10-9d3314d68465\") " pod="openshift-marketplace/redhat-operators-tcnpx" Dec 13 07:19:49 crc kubenswrapper[5048]: I1213 07:19:49.048640 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0675d3da-ae8e-4a43-9b10-9d3314d68465-utilities\") pod \"redhat-operators-tcnpx\" (UID: \"0675d3da-ae8e-4a43-9b10-9d3314d68465\") " pod="openshift-marketplace/redhat-operators-tcnpx" Dec 13 07:19:49 crc kubenswrapper[5048]: I1213 07:19:49.049324 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0675d3da-ae8e-4a43-9b10-9d3314d68465-utilities\") pod \"redhat-operators-tcnpx\" (UID: \"0675d3da-ae8e-4a43-9b10-9d3314d68465\") " pod="openshift-marketplace/redhat-operators-tcnpx" Dec 13 07:19:49 crc kubenswrapper[5048]: I1213 07:19:49.049957 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0675d3da-ae8e-4a43-9b10-9d3314d68465-catalog-content\") pod \"redhat-operators-tcnpx\" (UID: \"0675d3da-ae8e-4a43-9b10-9d3314d68465\") " pod="openshift-marketplace/redhat-operators-tcnpx" Dec 13 07:19:49 crc kubenswrapper[5048]: I1213 07:19:49.079004 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vjmsf\" (UniqueName: \"kubernetes.io/projected/0675d3da-ae8e-4a43-9b10-9d3314d68465-kube-api-access-vjmsf\") pod \"redhat-operators-tcnpx\" (UID: \"0675d3da-ae8e-4a43-9b10-9d3314d68465\") " pod="openshift-marketplace/redhat-operators-tcnpx" Dec 13 07:19:49 crc kubenswrapper[5048]: I1213 07:19:49.097938 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tcnpx" Dec 13 07:19:49 crc kubenswrapper[5048]: I1213 07:19:49.607407 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tcnpx"] Dec 13 07:19:49 crc kubenswrapper[5048]: I1213 07:19:49.804208 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tcnpx" event={"ID":"0675d3da-ae8e-4a43-9b10-9d3314d68465","Type":"ContainerStarted","Data":"b4951ebf8f2074c07e077ea9f16511d9833beef3ad0c90d218885d2e13705ff1"} Dec 13 07:19:49 crc kubenswrapper[5048]: I1213 07:19:49.804490 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tcnpx" event={"ID":"0675d3da-ae8e-4a43-9b10-9d3314d68465","Type":"ContainerStarted","Data":"bbc7375d4e4bdec370ac4c1e38f96bfdf3b6df1e65a8d92e536aa261edd3893c"} Dec 13 07:19:50 crc kubenswrapper[5048]: I1213 07:19:50.829785 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tcnpx" event={"ID":"0675d3da-ae8e-4a43-9b10-9d3314d68465","Type":"ContainerDied","Data":"b4951ebf8f2074c07e077ea9f16511d9833beef3ad0c90d218885d2e13705ff1"} Dec 13 07:19:50 crc kubenswrapper[5048]: I1213 07:19:50.829940 5048 generic.go:334] "Generic (PLEG): container finished" podID="0675d3da-ae8e-4a43-9b10-9d3314d68465" containerID="b4951ebf8f2074c07e077ea9f16511d9833beef3ad0c90d218885d2e13705ff1" exitCode=0 Dec 13 07:19:51 crc kubenswrapper[5048]: I1213 07:19:51.842175 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tcnpx" event={"ID":"0675d3da-ae8e-4a43-9b10-9d3314d68465","Type":"ContainerStarted","Data":"30354aeed506defed727b03f08ce7597e3d1d67a4e1e19b5cfc40a69614430f6"} Dec 13 07:19:54 crc kubenswrapper[5048]: I1213 07:19:54.875962 5048 generic.go:334] "Generic (PLEG): container finished" podID="0675d3da-ae8e-4a43-9b10-9d3314d68465" containerID="30354aeed506defed727b03f08ce7597e3d1d67a4e1e19b5cfc40a69614430f6" exitCode=0 Dec 13 07:19:54 crc kubenswrapper[5048]: I1213 07:19:54.876104 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tcnpx" event={"ID":"0675d3da-ae8e-4a43-9b10-9d3314d68465","Type":"ContainerDied","Data":"30354aeed506defed727b03f08ce7597e3d1d67a4e1e19b5cfc40a69614430f6"} Dec 13 07:19:56 crc kubenswrapper[5048]: I1213 07:19:56.900606 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tcnpx" event={"ID":"0675d3da-ae8e-4a43-9b10-9d3314d68465","Type":"ContainerStarted","Data":"4009cc2941098a043a7509f96a4aad1b5caf3f0899a696f056b7d8295b26bd93"} Dec 13 07:19:56 crc kubenswrapper[5048]: I1213 07:19:56.924504 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tcnpx" podStartSLOduration=3.938834409 podStartE2EDuration="8.924480888s" podCreationTimestamp="2025-12-13 07:19:48 +0000 UTC" firstStartedPulling="2025-12-13 07:19:50.832833424 +0000 UTC m=+3024.699428015" lastFinishedPulling="2025-12-13 07:19:55.818479903 +0000 UTC m=+3029.685074494" observedRunningTime="2025-12-13 07:19:56.918169927 +0000 UTC m=+3030.784764518" watchObservedRunningTime="2025-12-13 07:19:56.924480888 +0000 UTC m=+3030.791075469" Dec 13 07:19:59 crc kubenswrapper[5048]: I1213 07:19:59.098994 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tcnpx" Dec 13 07:19:59 crc kubenswrapper[5048]: I1213 07:19:59.099354 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tcnpx" Dec 13 07:19:59 crc kubenswrapper[5048]: I1213 07:19:59.567353 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:19:59 crc kubenswrapper[5048]: E1213 07:19:59.567676 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:20:00 crc kubenswrapper[5048]: I1213 07:20:00.156261 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tcnpx" podUID="0675d3da-ae8e-4a43-9b10-9d3314d68465" containerName="registry-server" probeResult="failure" output=< Dec 13 07:20:00 crc kubenswrapper[5048]: timeout: failed to connect service ":50051" within 1s Dec 13 07:20:00 crc kubenswrapper[5048]: > Dec 13 07:20:09 crc kubenswrapper[5048]: I1213 07:20:09.143977 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tcnpx" Dec 13 07:20:09 crc kubenswrapper[5048]: I1213 07:20:09.264304 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tcnpx" Dec 13 07:20:09 crc kubenswrapper[5048]: I1213 07:20:09.396228 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tcnpx"] Dec 13 07:20:11 crc kubenswrapper[5048]: I1213 07:20:11.051969 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tcnpx" podUID="0675d3da-ae8e-4a43-9b10-9d3314d68465" containerName="registry-server" containerID="cri-o://4009cc2941098a043a7509f96a4aad1b5caf3f0899a696f056b7d8295b26bd93" gracePeriod=2 Dec 13 07:20:11 crc kubenswrapper[5048]: I1213 07:20:11.656579 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tcnpx" Dec 13 07:20:11 crc kubenswrapper[5048]: I1213 07:20:11.718332 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0675d3da-ae8e-4a43-9b10-9d3314d68465-utilities\") pod \"0675d3da-ae8e-4a43-9b10-9d3314d68465\" (UID: \"0675d3da-ae8e-4a43-9b10-9d3314d68465\") " Dec 13 07:20:11 crc kubenswrapper[5048]: I1213 07:20:11.718631 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0675d3da-ae8e-4a43-9b10-9d3314d68465-catalog-content\") pod \"0675d3da-ae8e-4a43-9b10-9d3314d68465\" (UID: \"0675d3da-ae8e-4a43-9b10-9d3314d68465\") " Dec 13 07:20:11 crc kubenswrapper[5048]: I1213 07:20:11.718693 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vjmsf\" (UniqueName: \"kubernetes.io/projected/0675d3da-ae8e-4a43-9b10-9d3314d68465-kube-api-access-vjmsf\") pod \"0675d3da-ae8e-4a43-9b10-9d3314d68465\" (UID: \"0675d3da-ae8e-4a43-9b10-9d3314d68465\") " Dec 13 07:20:11 crc kubenswrapper[5048]: I1213 07:20:11.719209 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0675d3da-ae8e-4a43-9b10-9d3314d68465-utilities" (OuterVolumeSpecName: "utilities") pod "0675d3da-ae8e-4a43-9b10-9d3314d68465" (UID: "0675d3da-ae8e-4a43-9b10-9d3314d68465"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:20:11 crc kubenswrapper[5048]: I1213 07:20:11.719470 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0675d3da-ae8e-4a43-9b10-9d3314d68465-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 07:20:11 crc kubenswrapper[5048]: I1213 07:20:11.726093 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0675d3da-ae8e-4a43-9b10-9d3314d68465-kube-api-access-vjmsf" (OuterVolumeSpecName: "kube-api-access-vjmsf") pod "0675d3da-ae8e-4a43-9b10-9d3314d68465" (UID: "0675d3da-ae8e-4a43-9b10-9d3314d68465"). InnerVolumeSpecName "kube-api-access-vjmsf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:20:11 crc kubenswrapper[5048]: I1213 07:20:11.821157 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vjmsf\" (UniqueName: \"kubernetes.io/projected/0675d3da-ae8e-4a43-9b10-9d3314d68465-kube-api-access-vjmsf\") on node \"crc\" DevicePath \"\"" Dec 13 07:20:11 crc kubenswrapper[5048]: I1213 07:20:11.842119 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0675d3da-ae8e-4a43-9b10-9d3314d68465-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0675d3da-ae8e-4a43-9b10-9d3314d68465" (UID: "0675d3da-ae8e-4a43-9b10-9d3314d68465"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:20:11 crc kubenswrapper[5048]: I1213 07:20:11.923603 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0675d3da-ae8e-4a43-9b10-9d3314d68465-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 07:20:12 crc kubenswrapper[5048]: I1213 07:20:12.066413 5048 generic.go:334] "Generic (PLEG): container finished" podID="0675d3da-ae8e-4a43-9b10-9d3314d68465" containerID="4009cc2941098a043a7509f96a4aad1b5caf3f0899a696f056b7d8295b26bd93" exitCode=0 Dec 13 07:20:12 crc kubenswrapper[5048]: I1213 07:20:12.066467 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tcnpx" event={"ID":"0675d3da-ae8e-4a43-9b10-9d3314d68465","Type":"ContainerDied","Data":"4009cc2941098a043a7509f96a4aad1b5caf3f0899a696f056b7d8295b26bd93"} Dec 13 07:20:12 crc kubenswrapper[5048]: I1213 07:20:12.066502 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tcnpx" event={"ID":"0675d3da-ae8e-4a43-9b10-9d3314d68465","Type":"ContainerDied","Data":"bbc7375d4e4bdec370ac4c1e38f96bfdf3b6df1e65a8d92e536aa261edd3893c"} Dec 13 07:20:12 crc kubenswrapper[5048]: I1213 07:20:12.066522 5048 scope.go:117] "RemoveContainer" containerID="4009cc2941098a043a7509f96a4aad1b5caf3f0899a696f056b7d8295b26bd93" Dec 13 07:20:12 crc kubenswrapper[5048]: I1213 07:20:12.066526 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tcnpx" Dec 13 07:20:12 crc kubenswrapper[5048]: I1213 07:20:12.100258 5048 scope.go:117] "RemoveContainer" containerID="30354aeed506defed727b03f08ce7597e3d1d67a4e1e19b5cfc40a69614430f6" Dec 13 07:20:12 crc kubenswrapper[5048]: I1213 07:20:12.107022 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tcnpx"] Dec 13 07:20:12 crc kubenswrapper[5048]: I1213 07:20:12.114704 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tcnpx"] Dec 13 07:20:12 crc kubenswrapper[5048]: I1213 07:20:12.124186 5048 scope.go:117] "RemoveContainer" containerID="b4951ebf8f2074c07e077ea9f16511d9833beef3ad0c90d218885d2e13705ff1" Dec 13 07:20:12 crc kubenswrapper[5048]: I1213 07:20:12.171940 5048 scope.go:117] "RemoveContainer" containerID="4009cc2941098a043a7509f96a4aad1b5caf3f0899a696f056b7d8295b26bd93" Dec 13 07:20:12 crc kubenswrapper[5048]: E1213 07:20:12.172540 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4009cc2941098a043a7509f96a4aad1b5caf3f0899a696f056b7d8295b26bd93\": container with ID starting with 4009cc2941098a043a7509f96a4aad1b5caf3f0899a696f056b7d8295b26bd93 not found: ID does not exist" containerID="4009cc2941098a043a7509f96a4aad1b5caf3f0899a696f056b7d8295b26bd93" Dec 13 07:20:12 crc kubenswrapper[5048]: I1213 07:20:12.172581 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4009cc2941098a043a7509f96a4aad1b5caf3f0899a696f056b7d8295b26bd93"} err="failed to get container status \"4009cc2941098a043a7509f96a4aad1b5caf3f0899a696f056b7d8295b26bd93\": rpc error: code = NotFound desc = could not find container \"4009cc2941098a043a7509f96a4aad1b5caf3f0899a696f056b7d8295b26bd93\": container with ID starting with 4009cc2941098a043a7509f96a4aad1b5caf3f0899a696f056b7d8295b26bd93 not found: ID does not exist" Dec 13 07:20:12 crc kubenswrapper[5048]: I1213 07:20:12.172607 5048 scope.go:117] "RemoveContainer" containerID="30354aeed506defed727b03f08ce7597e3d1d67a4e1e19b5cfc40a69614430f6" Dec 13 07:20:12 crc kubenswrapper[5048]: E1213 07:20:12.173126 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30354aeed506defed727b03f08ce7597e3d1d67a4e1e19b5cfc40a69614430f6\": container with ID starting with 30354aeed506defed727b03f08ce7597e3d1d67a4e1e19b5cfc40a69614430f6 not found: ID does not exist" containerID="30354aeed506defed727b03f08ce7597e3d1d67a4e1e19b5cfc40a69614430f6" Dec 13 07:20:12 crc kubenswrapper[5048]: I1213 07:20:12.173155 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30354aeed506defed727b03f08ce7597e3d1d67a4e1e19b5cfc40a69614430f6"} err="failed to get container status \"30354aeed506defed727b03f08ce7597e3d1d67a4e1e19b5cfc40a69614430f6\": rpc error: code = NotFound desc = could not find container \"30354aeed506defed727b03f08ce7597e3d1d67a4e1e19b5cfc40a69614430f6\": container with ID starting with 30354aeed506defed727b03f08ce7597e3d1d67a4e1e19b5cfc40a69614430f6 not found: ID does not exist" Dec 13 07:20:12 crc kubenswrapper[5048]: I1213 07:20:12.173170 5048 scope.go:117] "RemoveContainer" containerID="b4951ebf8f2074c07e077ea9f16511d9833beef3ad0c90d218885d2e13705ff1" Dec 13 07:20:12 crc kubenswrapper[5048]: E1213 07:20:12.173525 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4951ebf8f2074c07e077ea9f16511d9833beef3ad0c90d218885d2e13705ff1\": container with ID starting with b4951ebf8f2074c07e077ea9f16511d9833beef3ad0c90d218885d2e13705ff1 not found: ID does not exist" containerID="b4951ebf8f2074c07e077ea9f16511d9833beef3ad0c90d218885d2e13705ff1" Dec 13 07:20:12 crc kubenswrapper[5048]: I1213 07:20:12.173553 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4951ebf8f2074c07e077ea9f16511d9833beef3ad0c90d218885d2e13705ff1"} err="failed to get container status \"b4951ebf8f2074c07e077ea9f16511d9833beef3ad0c90d218885d2e13705ff1\": rpc error: code = NotFound desc = could not find container \"b4951ebf8f2074c07e077ea9f16511d9833beef3ad0c90d218885d2e13705ff1\": container with ID starting with b4951ebf8f2074c07e077ea9f16511d9833beef3ad0c90d218885d2e13705ff1 not found: ID does not exist" Dec 13 07:20:12 crc kubenswrapper[5048]: I1213 07:20:12.567708 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:20:12 crc kubenswrapper[5048]: E1213 07:20:12.568095 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:20:12 crc kubenswrapper[5048]: I1213 07:20:12.580128 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0675d3da-ae8e-4a43-9b10-9d3314d68465" path="/var/lib/kubelet/pods/0675d3da-ae8e-4a43-9b10-9d3314d68465/volumes" Dec 13 07:20:23 crc kubenswrapper[5048]: I1213 07:20:23.567808 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:20:23 crc kubenswrapper[5048]: E1213 07:20:23.568563 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:20:37 crc kubenswrapper[5048]: I1213 07:20:37.567383 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:20:37 crc kubenswrapper[5048]: E1213 07:20:37.568673 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:20:52 crc kubenswrapper[5048]: I1213 07:20:52.568700 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:20:52 crc kubenswrapper[5048]: E1213 07:20:52.569627 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:21:05 crc kubenswrapper[5048]: I1213 07:21:05.567391 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:21:05 crc kubenswrapper[5048]: E1213 07:21:05.569821 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:21:20 crc kubenswrapper[5048]: I1213 07:21:20.568817 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:21:20 crc kubenswrapper[5048]: E1213 07:21:20.569663 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:21:32 crc kubenswrapper[5048]: I1213 07:21:32.567258 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:21:32 crc kubenswrapper[5048]: E1213 07:21:32.568049 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:21:46 crc kubenswrapper[5048]: I1213 07:21:46.574563 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:21:46 crc kubenswrapper[5048]: E1213 07:21:46.575530 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:22:01 crc kubenswrapper[5048]: I1213 07:22:01.567666 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:22:01 crc kubenswrapper[5048]: E1213 07:22:01.568696 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:22:13 crc kubenswrapper[5048]: I1213 07:22:13.567075 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:22:13 crc kubenswrapper[5048]: E1213 07:22:13.567834 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:22:27 crc kubenswrapper[5048]: I1213 07:22:27.567885 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:22:27 crc kubenswrapper[5048]: E1213 07:22:27.569221 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:22:39 crc kubenswrapper[5048]: I1213 07:22:39.566919 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:22:39 crc kubenswrapper[5048]: E1213 07:22:39.567980 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:22:52 crc kubenswrapper[5048]: I1213 07:22:52.567350 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:22:52 crc kubenswrapper[5048]: E1213 07:22:52.568164 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:22:52 crc kubenswrapper[5048]: I1213 07:22:52.630544 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-v9n5c"] Dec 13 07:22:52 crc kubenswrapper[5048]: E1213 07:22:52.631183 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0675d3da-ae8e-4a43-9b10-9d3314d68465" containerName="extract-content" Dec 13 07:22:52 crc kubenswrapper[5048]: I1213 07:22:52.631197 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="0675d3da-ae8e-4a43-9b10-9d3314d68465" containerName="extract-content" Dec 13 07:22:52 crc kubenswrapper[5048]: E1213 07:22:52.631219 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0675d3da-ae8e-4a43-9b10-9d3314d68465" containerName="registry-server" Dec 13 07:22:52 crc kubenswrapper[5048]: I1213 07:22:52.631224 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="0675d3da-ae8e-4a43-9b10-9d3314d68465" containerName="registry-server" Dec 13 07:22:52 crc kubenswrapper[5048]: E1213 07:22:52.631243 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0675d3da-ae8e-4a43-9b10-9d3314d68465" containerName="extract-utilities" Dec 13 07:22:52 crc kubenswrapper[5048]: I1213 07:22:52.631250 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="0675d3da-ae8e-4a43-9b10-9d3314d68465" containerName="extract-utilities" Dec 13 07:22:52 crc kubenswrapper[5048]: I1213 07:22:52.631423 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="0675d3da-ae8e-4a43-9b10-9d3314d68465" containerName="registry-server" Dec 13 07:22:52 crc kubenswrapper[5048]: I1213 07:22:52.632709 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v9n5c" Dec 13 07:22:52 crc kubenswrapper[5048]: I1213 07:22:52.639956 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-v9n5c"] Dec 13 07:22:52 crc kubenswrapper[5048]: I1213 07:22:52.700879 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39aadb51-9c02-47dc-a8a7-c325d36473e7-utilities\") pod \"community-operators-v9n5c\" (UID: \"39aadb51-9c02-47dc-a8a7-c325d36473e7\") " pod="openshift-marketplace/community-operators-v9n5c" Dec 13 07:22:52 crc kubenswrapper[5048]: I1213 07:22:52.701036 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39aadb51-9c02-47dc-a8a7-c325d36473e7-catalog-content\") pod \"community-operators-v9n5c\" (UID: \"39aadb51-9c02-47dc-a8a7-c325d36473e7\") " pod="openshift-marketplace/community-operators-v9n5c" Dec 13 07:22:52 crc kubenswrapper[5048]: I1213 07:22:52.701132 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlz6t\" (UniqueName: \"kubernetes.io/projected/39aadb51-9c02-47dc-a8a7-c325d36473e7-kube-api-access-jlz6t\") pod \"community-operators-v9n5c\" (UID: \"39aadb51-9c02-47dc-a8a7-c325d36473e7\") " pod="openshift-marketplace/community-operators-v9n5c" Dec 13 07:22:52 crc kubenswrapper[5048]: I1213 07:22:52.802995 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39aadb51-9c02-47dc-a8a7-c325d36473e7-utilities\") pod \"community-operators-v9n5c\" (UID: \"39aadb51-9c02-47dc-a8a7-c325d36473e7\") " pod="openshift-marketplace/community-operators-v9n5c" Dec 13 07:22:52 crc kubenswrapper[5048]: I1213 07:22:52.803085 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39aadb51-9c02-47dc-a8a7-c325d36473e7-catalog-content\") pod \"community-operators-v9n5c\" (UID: \"39aadb51-9c02-47dc-a8a7-c325d36473e7\") " pod="openshift-marketplace/community-operators-v9n5c" Dec 13 07:22:52 crc kubenswrapper[5048]: I1213 07:22:52.803135 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlz6t\" (UniqueName: \"kubernetes.io/projected/39aadb51-9c02-47dc-a8a7-c325d36473e7-kube-api-access-jlz6t\") pod \"community-operators-v9n5c\" (UID: \"39aadb51-9c02-47dc-a8a7-c325d36473e7\") " pod="openshift-marketplace/community-operators-v9n5c" Dec 13 07:22:52 crc kubenswrapper[5048]: I1213 07:22:52.803516 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39aadb51-9c02-47dc-a8a7-c325d36473e7-utilities\") pod \"community-operators-v9n5c\" (UID: \"39aadb51-9c02-47dc-a8a7-c325d36473e7\") " pod="openshift-marketplace/community-operators-v9n5c" Dec 13 07:22:52 crc kubenswrapper[5048]: I1213 07:22:52.803691 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39aadb51-9c02-47dc-a8a7-c325d36473e7-catalog-content\") pod \"community-operators-v9n5c\" (UID: \"39aadb51-9c02-47dc-a8a7-c325d36473e7\") " pod="openshift-marketplace/community-operators-v9n5c" Dec 13 07:22:52 crc kubenswrapper[5048]: I1213 07:22:52.828349 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlz6t\" (UniqueName: \"kubernetes.io/projected/39aadb51-9c02-47dc-a8a7-c325d36473e7-kube-api-access-jlz6t\") pod \"community-operators-v9n5c\" (UID: \"39aadb51-9c02-47dc-a8a7-c325d36473e7\") " pod="openshift-marketplace/community-operators-v9n5c" Dec 13 07:22:52 crc kubenswrapper[5048]: I1213 07:22:52.962112 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v9n5c" Dec 13 07:22:53 crc kubenswrapper[5048]: I1213 07:22:53.215578 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5bdcd"] Dec 13 07:22:53 crc kubenswrapper[5048]: I1213 07:22:53.217791 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5bdcd" Dec 13 07:22:53 crc kubenswrapper[5048]: I1213 07:22:53.224955 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5bdcd"] Dec 13 07:22:53 crc kubenswrapper[5048]: I1213 07:22:53.311211 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdsln\" (UniqueName: \"kubernetes.io/projected/c17760c9-aea4-4fb2-9dd4-2e4bf7569738-kube-api-access-wdsln\") pod \"certified-operators-5bdcd\" (UID: \"c17760c9-aea4-4fb2-9dd4-2e4bf7569738\") " pod="openshift-marketplace/certified-operators-5bdcd" Dec 13 07:22:53 crc kubenswrapper[5048]: I1213 07:22:53.311290 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c17760c9-aea4-4fb2-9dd4-2e4bf7569738-catalog-content\") pod \"certified-operators-5bdcd\" (UID: \"c17760c9-aea4-4fb2-9dd4-2e4bf7569738\") " pod="openshift-marketplace/certified-operators-5bdcd" Dec 13 07:22:53 crc kubenswrapper[5048]: I1213 07:22:53.311347 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c17760c9-aea4-4fb2-9dd4-2e4bf7569738-utilities\") pod \"certified-operators-5bdcd\" (UID: \"c17760c9-aea4-4fb2-9dd4-2e4bf7569738\") " pod="openshift-marketplace/certified-operators-5bdcd" Dec 13 07:22:53 crc kubenswrapper[5048]: I1213 07:22:53.413666 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdsln\" (UniqueName: \"kubernetes.io/projected/c17760c9-aea4-4fb2-9dd4-2e4bf7569738-kube-api-access-wdsln\") pod \"certified-operators-5bdcd\" (UID: \"c17760c9-aea4-4fb2-9dd4-2e4bf7569738\") " pod="openshift-marketplace/certified-operators-5bdcd" Dec 13 07:22:53 crc kubenswrapper[5048]: I1213 07:22:53.413750 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c17760c9-aea4-4fb2-9dd4-2e4bf7569738-catalog-content\") pod \"certified-operators-5bdcd\" (UID: \"c17760c9-aea4-4fb2-9dd4-2e4bf7569738\") " pod="openshift-marketplace/certified-operators-5bdcd" Dec 13 07:22:53 crc kubenswrapper[5048]: I1213 07:22:53.413800 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c17760c9-aea4-4fb2-9dd4-2e4bf7569738-utilities\") pod \"certified-operators-5bdcd\" (UID: \"c17760c9-aea4-4fb2-9dd4-2e4bf7569738\") " pod="openshift-marketplace/certified-operators-5bdcd" Dec 13 07:22:53 crc kubenswrapper[5048]: I1213 07:22:53.414352 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c17760c9-aea4-4fb2-9dd4-2e4bf7569738-catalog-content\") pod \"certified-operators-5bdcd\" (UID: \"c17760c9-aea4-4fb2-9dd4-2e4bf7569738\") " pod="openshift-marketplace/certified-operators-5bdcd" Dec 13 07:22:53 crc kubenswrapper[5048]: I1213 07:22:53.414525 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c17760c9-aea4-4fb2-9dd4-2e4bf7569738-utilities\") pod \"certified-operators-5bdcd\" (UID: \"c17760c9-aea4-4fb2-9dd4-2e4bf7569738\") " pod="openshift-marketplace/certified-operators-5bdcd" Dec 13 07:22:53 crc kubenswrapper[5048]: I1213 07:22:53.432562 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdsln\" (UniqueName: \"kubernetes.io/projected/c17760c9-aea4-4fb2-9dd4-2e4bf7569738-kube-api-access-wdsln\") pod \"certified-operators-5bdcd\" (UID: \"c17760c9-aea4-4fb2-9dd4-2e4bf7569738\") " pod="openshift-marketplace/certified-operators-5bdcd" Dec 13 07:22:53 crc kubenswrapper[5048]: I1213 07:22:53.506796 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-v9n5c"] Dec 13 07:22:53 crc kubenswrapper[5048]: W1213 07:22:53.518768 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod39aadb51_9c02_47dc_a8a7_c325d36473e7.slice/crio-e3045b0f8e3bec0e392bfca361f07e3fa014fb504017b0e51e0ba5bcf890d3a9 WatchSource:0}: Error finding container e3045b0f8e3bec0e392bfca361f07e3fa014fb504017b0e51e0ba5bcf890d3a9: Status 404 returned error can't find the container with id e3045b0f8e3bec0e392bfca361f07e3fa014fb504017b0e51e0ba5bcf890d3a9 Dec 13 07:22:53 crc kubenswrapper[5048]: I1213 07:22:53.543500 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5bdcd" Dec 13 07:22:53 crc kubenswrapper[5048]: I1213 07:22:53.846148 5048 generic.go:334] "Generic (PLEG): container finished" podID="39aadb51-9c02-47dc-a8a7-c325d36473e7" containerID="4beb31375bc85166b5b9fee561ad7c57f9c696b0e1f6df1f17ae7988dd94c450" exitCode=0 Dec 13 07:22:53 crc kubenswrapper[5048]: I1213 07:22:53.846488 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v9n5c" event={"ID":"39aadb51-9c02-47dc-a8a7-c325d36473e7","Type":"ContainerDied","Data":"4beb31375bc85166b5b9fee561ad7c57f9c696b0e1f6df1f17ae7988dd94c450"} Dec 13 07:22:53 crc kubenswrapper[5048]: I1213 07:22:53.846528 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v9n5c" event={"ID":"39aadb51-9c02-47dc-a8a7-c325d36473e7","Type":"ContainerStarted","Data":"e3045b0f8e3bec0e392bfca361f07e3fa014fb504017b0e51e0ba5bcf890d3a9"} Dec 13 07:22:53 crc kubenswrapper[5048]: I1213 07:22:53.851883 5048 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 13 07:22:54 crc kubenswrapper[5048]: I1213 07:22:54.145823 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5bdcd"] Dec 13 07:22:54 crc kubenswrapper[5048]: I1213 07:22:54.861036 5048 generic.go:334] "Generic (PLEG): container finished" podID="c17760c9-aea4-4fb2-9dd4-2e4bf7569738" containerID="31aec63449cd7f8a0122aea0415b2f0d244d3bfaed56c7e6c743f22bd571805c" exitCode=0 Dec 13 07:22:54 crc kubenswrapper[5048]: I1213 07:22:54.861109 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5bdcd" event={"ID":"c17760c9-aea4-4fb2-9dd4-2e4bf7569738","Type":"ContainerDied","Data":"31aec63449cd7f8a0122aea0415b2f0d244d3bfaed56c7e6c743f22bd571805c"} Dec 13 07:22:54 crc kubenswrapper[5048]: I1213 07:22:54.861695 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5bdcd" event={"ID":"c17760c9-aea4-4fb2-9dd4-2e4bf7569738","Type":"ContainerStarted","Data":"00efb331218624222602fa238a6055953729405c7c7b8d54eff56d07c4bfe810"} Dec 13 07:22:54 crc kubenswrapper[5048]: I1213 07:22:54.864357 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v9n5c" event={"ID":"39aadb51-9c02-47dc-a8a7-c325d36473e7","Type":"ContainerStarted","Data":"589beb37bda24d6c3810785eab1b891e2a974b9ec47e7746e793d31f86e694f1"} Dec 13 07:22:55 crc kubenswrapper[5048]: I1213 07:22:55.874832 5048 generic.go:334] "Generic (PLEG): container finished" podID="39aadb51-9c02-47dc-a8a7-c325d36473e7" containerID="589beb37bda24d6c3810785eab1b891e2a974b9ec47e7746e793d31f86e694f1" exitCode=0 Dec 13 07:22:55 crc kubenswrapper[5048]: I1213 07:22:55.875051 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v9n5c" event={"ID":"39aadb51-9c02-47dc-a8a7-c325d36473e7","Type":"ContainerDied","Data":"589beb37bda24d6c3810785eab1b891e2a974b9ec47e7746e793d31f86e694f1"} Dec 13 07:22:56 crc kubenswrapper[5048]: I1213 07:22:56.890321 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5bdcd" event={"ID":"c17760c9-aea4-4fb2-9dd4-2e4bf7569738","Type":"ContainerStarted","Data":"167391660ad5d1cf85e1ef41207ac2f49ad9e1d019f19918f814e8572fbc8af1"} Dec 13 07:22:57 crc kubenswrapper[5048]: I1213 07:22:57.907221 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v9n5c" event={"ID":"39aadb51-9c02-47dc-a8a7-c325d36473e7","Type":"ContainerStarted","Data":"566910a2e2c4b6cb02e88198e1ccb88f3a30de0f1a148ac521006d975e5d44fa"} Dec 13 07:22:57 crc kubenswrapper[5048]: I1213 07:22:57.910330 5048 generic.go:334] "Generic (PLEG): container finished" podID="c17760c9-aea4-4fb2-9dd4-2e4bf7569738" containerID="167391660ad5d1cf85e1ef41207ac2f49ad9e1d019f19918f814e8572fbc8af1" exitCode=0 Dec 13 07:22:57 crc kubenswrapper[5048]: I1213 07:22:57.910378 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5bdcd" event={"ID":"c17760c9-aea4-4fb2-9dd4-2e4bf7569738","Type":"ContainerDied","Data":"167391660ad5d1cf85e1ef41207ac2f49ad9e1d019f19918f814e8572fbc8af1"} Dec 13 07:22:57 crc kubenswrapper[5048]: I1213 07:22:57.939599 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-v9n5c" podStartSLOduration=3.344285259 podStartE2EDuration="5.939559696s" podCreationTimestamp="2025-12-13 07:22:52 +0000 UTC" firstStartedPulling="2025-12-13 07:22:53.851584622 +0000 UTC m=+3207.718179203" lastFinishedPulling="2025-12-13 07:22:56.446859059 +0000 UTC m=+3210.313453640" observedRunningTime="2025-12-13 07:22:57.925896277 +0000 UTC m=+3211.792490908" watchObservedRunningTime="2025-12-13 07:22:57.939559696 +0000 UTC m=+3211.806154317" Dec 13 07:22:59 crc kubenswrapper[5048]: I1213 07:22:59.936320 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5bdcd" event={"ID":"c17760c9-aea4-4fb2-9dd4-2e4bf7569738","Type":"ContainerStarted","Data":"eb5e83839d5e9b2ee902a9035fba92407eef2d4a81c630e60525ecab2ec7bce7"} Dec 13 07:22:59 crc kubenswrapper[5048]: I1213 07:22:59.962476 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5bdcd" podStartSLOduration=2.631720539 podStartE2EDuration="6.962451068s" podCreationTimestamp="2025-12-13 07:22:53 +0000 UTC" firstStartedPulling="2025-12-13 07:22:54.863426784 +0000 UTC m=+3208.730021365" lastFinishedPulling="2025-12-13 07:22:59.194157323 +0000 UTC m=+3213.060751894" observedRunningTime="2025-12-13 07:22:59.951599666 +0000 UTC m=+3213.818194257" watchObservedRunningTime="2025-12-13 07:22:59.962451068 +0000 UTC m=+3213.829045649" Dec 13 07:23:02 crc kubenswrapper[5048]: I1213 07:23:02.962975 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-v9n5c" Dec 13 07:23:02 crc kubenswrapper[5048]: I1213 07:23:02.963476 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-v9n5c" Dec 13 07:23:03 crc kubenswrapper[5048]: I1213 07:23:03.014632 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-v9n5c" Dec 13 07:23:03 crc kubenswrapper[5048]: I1213 07:23:03.544394 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5bdcd" Dec 13 07:23:03 crc kubenswrapper[5048]: I1213 07:23:03.544440 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5bdcd" Dec 13 07:23:03 crc kubenswrapper[5048]: I1213 07:23:03.598071 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5bdcd" Dec 13 07:23:04 crc kubenswrapper[5048]: I1213 07:23:04.019340 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-v9n5c" Dec 13 07:23:05 crc kubenswrapper[5048]: I1213 07:23:05.567982 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:23:05 crc kubenswrapper[5048]: E1213 07:23:05.568707 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:23:05 crc kubenswrapper[5048]: I1213 07:23:05.606478 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-v9n5c"] Dec 13 07:23:06 crc kubenswrapper[5048]: I1213 07:23:06.012629 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-v9n5c" podUID="39aadb51-9c02-47dc-a8a7-c325d36473e7" containerName="registry-server" containerID="cri-o://566910a2e2c4b6cb02e88198e1ccb88f3a30de0f1a148ac521006d975e5d44fa" gracePeriod=2 Dec 13 07:23:06 crc kubenswrapper[5048]: I1213 07:23:06.470302 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v9n5c" Dec 13 07:23:06 crc kubenswrapper[5048]: I1213 07:23:06.600996 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jlz6t\" (UniqueName: \"kubernetes.io/projected/39aadb51-9c02-47dc-a8a7-c325d36473e7-kube-api-access-jlz6t\") pod \"39aadb51-9c02-47dc-a8a7-c325d36473e7\" (UID: \"39aadb51-9c02-47dc-a8a7-c325d36473e7\") " Dec 13 07:23:06 crc kubenswrapper[5048]: I1213 07:23:06.601156 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39aadb51-9c02-47dc-a8a7-c325d36473e7-utilities\") pod \"39aadb51-9c02-47dc-a8a7-c325d36473e7\" (UID: \"39aadb51-9c02-47dc-a8a7-c325d36473e7\") " Dec 13 07:23:06 crc kubenswrapper[5048]: I1213 07:23:06.601207 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39aadb51-9c02-47dc-a8a7-c325d36473e7-catalog-content\") pod \"39aadb51-9c02-47dc-a8a7-c325d36473e7\" (UID: \"39aadb51-9c02-47dc-a8a7-c325d36473e7\") " Dec 13 07:23:06 crc kubenswrapper[5048]: I1213 07:23:06.603810 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39aadb51-9c02-47dc-a8a7-c325d36473e7-utilities" (OuterVolumeSpecName: "utilities") pod "39aadb51-9c02-47dc-a8a7-c325d36473e7" (UID: "39aadb51-9c02-47dc-a8a7-c325d36473e7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:23:06 crc kubenswrapper[5048]: I1213 07:23:06.618911 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39aadb51-9c02-47dc-a8a7-c325d36473e7-kube-api-access-jlz6t" (OuterVolumeSpecName: "kube-api-access-jlz6t") pod "39aadb51-9c02-47dc-a8a7-c325d36473e7" (UID: "39aadb51-9c02-47dc-a8a7-c325d36473e7"). InnerVolumeSpecName "kube-api-access-jlz6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:23:06 crc kubenswrapper[5048]: I1213 07:23:06.651893 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39aadb51-9c02-47dc-a8a7-c325d36473e7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "39aadb51-9c02-47dc-a8a7-c325d36473e7" (UID: "39aadb51-9c02-47dc-a8a7-c325d36473e7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:23:06 crc kubenswrapper[5048]: I1213 07:23:06.703823 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39aadb51-9c02-47dc-a8a7-c325d36473e7-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 07:23:06 crc kubenswrapper[5048]: I1213 07:23:06.703903 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39aadb51-9c02-47dc-a8a7-c325d36473e7-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 07:23:06 crc kubenswrapper[5048]: I1213 07:23:06.703925 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jlz6t\" (UniqueName: \"kubernetes.io/projected/39aadb51-9c02-47dc-a8a7-c325d36473e7-kube-api-access-jlz6t\") on node \"crc\" DevicePath \"\"" Dec 13 07:23:07 crc kubenswrapper[5048]: I1213 07:23:07.025429 5048 generic.go:334] "Generic (PLEG): container finished" podID="39aadb51-9c02-47dc-a8a7-c325d36473e7" containerID="566910a2e2c4b6cb02e88198e1ccb88f3a30de0f1a148ac521006d975e5d44fa" exitCode=0 Dec 13 07:23:07 crc kubenswrapper[5048]: I1213 07:23:07.025524 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v9n5c" Dec 13 07:23:07 crc kubenswrapper[5048]: I1213 07:23:07.025528 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v9n5c" event={"ID":"39aadb51-9c02-47dc-a8a7-c325d36473e7","Type":"ContainerDied","Data":"566910a2e2c4b6cb02e88198e1ccb88f3a30de0f1a148ac521006d975e5d44fa"} Dec 13 07:23:07 crc kubenswrapper[5048]: I1213 07:23:07.025636 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v9n5c" event={"ID":"39aadb51-9c02-47dc-a8a7-c325d36473e7","Type":"ContainerDied","Data":"e3045b0f8e3bec0e392bfca361f07e3fa014fb504017b0e51e0ba5bcf890d3a9"} Dec 13 07:23:07 crc kubenswrapper[5048]: I1213 07:23:07.025685 5048 scope.go:117] "RemoveContainer" containerID="566910a2e2c4b6cb02e88198e1ccb88f3a30de0f1a148ac521006d975e5d44fa" Dec 13 07:23:07 crc kubenswrapper[5048]: I1213 07:23:07.057833 5048 scope.go:117] "RemoveContainer" containerID="589beb37bda24d6c3810785eab1b891e2a974b9ec47e7746e793d31f86e694f1" Dec 13 07:23:07 crc kubenswrapper[5048]: I1213 07:23:07.082665 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-v9n5c"] Dec 13 07:23:07 crc kubenswrapper[5048]: I1213 07:23:07.094249 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-v9n5c"] Dec 13 07:23:07 crc kubenswrapper[5048]: I1213 07:23:07.099287 5048 scope.go:117] "RemoveContainer" containerID="4beb31375bc85166b5b9fee561ad7c57f9c696b0e1f6df1f17ae7988dd94c450" Dec 13 07:23:07 crc kubenswrapper[5048]: I1213 07:23:07.132987 5048 scope.go:117] "RemoveContainer" containerID="566910a2e2c4b6cb02e88198e1ccb88f3a30de0f1a148ac521006d975e5d44fa" Dec 13 07:23:07 crc kubenswrapper[5048]: E1213 07:23:07.133605 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"566910a2e2c4b6cb02e88198e1ccb88f3a30de0f1a148ac521006d975e5d44fa\": container with ID starting with 566910a2e2c4b6cb02e88198e1ccb88f3a30de0f1a148ac521006d975e5d44fa not found: ID does not exist" containerID="566910a2e2c4b6cb02e88198e1ccb88f3a30de0f1a148ac521006d975e5d44fa" Dec 13 07:23:07 crc kubenswrapper[5048]: I1213 07:23:07.133670 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"566910a2e2c4b6cb02e88198e1ccb88f3a30de0f1a148ac521006d975e5d44fa"} err="failed to get container status \"566910a2e2c4b6cb02e88198e1ccb88f3a30de0f1a148ac521006d975e5d44fa\": rpc error: code = NotFound desc = could not find container \"566910a2e2c4b6cb02e88198e1ccb88f3a30de0f1a148ac521006d975e5d44fa\": container with ID starting with 566910a2e2c4b6cb02e88198e1ccb88f3a30de0f1a148ac521006d975e5d44fa not found: ID does not exist" Dec 13 07:23:07 crc kubenswrapper[5048]: I1213 07:23:07.133693 5048 scope.go:117] "RemoveContainer" containerID="589beb37bda24d6c3810785eab1b891e2a974b9ec47e7746e793d31f86e694f1" Dec 13 07:23:07 crc kubenswrapper[5048]: E1213 07:23:07.134245 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"589beb37bda24d6c3810785eab1b891e2a974b9ec47e7746e793d31f86e694f1\": container with ID starting with 589beb37bda24d6c3810785eab1b891e2a974b9ec47e7746e793d31f86e694f1 not found: ID does not exist" containerID="589beb37bda24d6c3810785eab1b891e2a974b9ec47e7746e793d31f86e694f1" Dec 13 07:23:07 crc kubenswrapper[5048]: I1213 07:23:07.134304 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"589beb37bda24d6c3810785eab1b891e2a974b9ec47e7746e793d31f86e694f1"} err="failed to get container status \"589beb37bda24d6c3810785eab1b891e2a974b9ec47e7746e793d31f86e694f1\": rpc error: code = NotFound desc = could not find container \"589beb37bda24d6c3810785eab1b891e2a974b9ec47e7746e793d31f86e694f1\": container with ID starting with 589beb37bda24d6c3810785eab1b891e2a974b9ec47e7746e793d31f86e694f1 not found: ID does not exist" Dec 13 07:23:07 crc kubenswrapper[5048]: I1213 07:23:07.134342 5048 scope.go:117] "RemoveContainer" containerID="4beb31375bc85166b5b9fee561ad7c57f9c696b0e1f6df1f17ae7988dd94c450" Dec 13 07:23:07 crc kubenswrapper[5048]: E1213 07:23:07.134798 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4beb31375bc85166b5b9fee561ad7c57f9c696b0e1f6df1f17ae7988dd94c450\": container with ID starting with 4beb31375bc85166b5b9fee561ad7c57f9c696b0e1f6df1f17ae7988dd94c450 not found: ID does not exist" containerID="4beb31375bc85166b5b9fee561ad7c57f9c696b0e1f6df1f17ae7988dd94c450" Dec 13 07:23:07 crc kubenswrapper[5048]: I1213 07:23:07.134829 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4beb31375bc85166b5b9fee561ad7c57f9c696b0e1f6df1f17ae7988dd94c450"} err="failed to get container status \"4beb31375bc85166b5b9fee561ad7c57f9c696b0e1f6df1f17ae7988dd94c450\": rpc error: code = NotFound desc = could not find container \"4beb31375bc85166b5b9fee561ad7c57f9c696b0e1f6df1f17ae7988dd94c450\": container with ID starting with 4beb31375bc85166b5b9fee561ad7c57f9c696b0e1f6df1f17ae7988dd94c450 not found: ID does not exist" Dec 13 07:23:08 crc kubenswrapper[5048]: I1213 07:23:08.582894 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39aadb51-9c02-47dc-a8a7-c325d36473e7" path="/var/lib/kubelet/pods/39aadb51-9c02-47dc-a8a7-c325d36473e7/volumes" Dec 13 07:23:13 crc kubenswrapper[5048]: I1213 07:23:13.598321 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5bdcd" Dec 13 07:23:14 crc kubenswrapper[5048]: I1213 07:23:14.209046 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5bdcd"] Dec 13 07:23:14 crc kubenswrapper[5048]: I1213 07:23:14.209330 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5bdcd" podUID="c17760c9-aea4-4fb2-9dd4-2e4bf7569738" containerName="registry-server" containerID="cri-o://eb5e83839d5e9b2ee902a9035fba92407eef2d4a81c630e60525ecab2ec7bce7" gracePeriod=2 Dec 13 07:23:14 crc kubenswrapper[5048]: I1213 07:23:14.822040 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5bdcd" Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.019714 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c17760c9-aea4-4fb2-9dd4-2e4bf7569738-utilities\") pod \"c17760c9-aea4-4fb2-9dd4-2e4bf7569738\" (UID: \"c17760c9-aea4-4fb2-9dd4-2e4bf7569738\") " Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.019839 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wdsln\" (UniqueName: \"kubernetes.io/projected/c17760c9-aea4-4fb2-9dd4-2e4bf7569738-kube-api-access-wdsln\") pod \"c17760c9-aea4-4fb2-9dd4-2e4bf7569738\" (UID: \"c17760c9-aea4-4fb2-9dd4-2e4bf7569738\") " Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.019971 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c17760c9-aea4-4fb2-9dd4-2e4bf7569738-catalog-content\") pod \"c17760c9-aea4-4fb2-9dd4-2e4bf7569738\" (UID: \"c17760c9-aea4-4fb2-9dd4-2e4bf7569738\") " Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.020646 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c17760c9-aea4-4fb2-9dd4-2e4bf7569738-utilities" (OuterVolumeSpecName: "utilities") pod "c17760c9-aea4-4fb2-9dd4-2e4bf7569738" (UID: "c17760c9-aea4-4fb2-9dd4-2e4bf7569738"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.031670 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c17760c9-aea4-4fb2-9dd4-2e4bf7569738-kube-api-access-wdsln" (OuterVolumeSpecName: "kube-api-access-wdsln") pod "c17760c9-aea4-4fb2-9dd4-2e4bf7569738" (UID: "c17760c9-aea4-4fb2-9dd4-2e4bf7569738"). InnerVolumeSpecName "kube-api-access-wdsln". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.073739 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c17760c9-aea4-4fb2-9dd4-2e4bf7569738-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c17760c9-aea4-4fb2-9dd4-2e4bf7569738" (UID: "c17760c9-aea4-4fb2-9dd4-2e4bf7569738"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.098501 5048 generic.go:334] "Generic (PLEG): container finished" podID="c17760c9-aea4-4fb2-9dd4-2e4bf7569738" containerID="eb5e83839d5e9b2ee902a9035fba92407eef2d4a81c630e60525ecab2ec7bce7" exitCode=0 Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.098552 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5bdcd" event={"ID":"c17760c9-aea4-4fb2-9dd4-2e4bf7569738","Type":"ContainerDied","Data":"eb5e83839d5e9b2ee902a9035fba92407eef2d4a81c630e60525ecab2ec7bce7"} Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.098557 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5bdcd" Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.098582 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5bdcd" event={"ID":"c17760c9-aea4-4fb2-9dd4-2e4bf7569738","Type":"ContainerDied","Data":"00efb331218624222602fa238a6055953729405c7c7b8d54eff56d07c4bfe810"} Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.098602 5048 scope.go:117] "RemoveContainer" containerID="eb5e83839d5e9b2ee902a9035fba92407eef2d4a81c630e60525ecab2ec7bce7" Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.122186 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c17760c9-aea4-4fb2-9dd4-2e4bf7569738-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.122404 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c17760c9-aea4-4fb2-9dd4-2e4bf7569738-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.122416 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wdsln\" (UniqueName: \"kubernetes.io/projected/c17760c9-aea4-4fb2-9dd4-2e4bf7569738-kube-api-access-wdsln\") on node \"crc\" DevicePath \"\"" Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.124182 5048 scope.go:117] "RemoveContainer" containerID="167391660ad5d1cf85e1ef41207ac2f49ad9e1d019f19918f814e8572fbc8af1" Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.141591 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5bdcd"] Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.150977 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5bdcd"] Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.165591 5048 scope.go:117] "RemoveContainer" containerID="31aec63449cd7f8a0122aea0415b2f0d244d3bfaed56c7e6c743f22bd571805c" Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.193346 5048 scope.go:117] "RemoveContainer" containerID="eb5e83839d5e9b2ee902a9035fba92407eef2d4a81c630e60525ecab2ec7bce7" Dec 13 07:23:15 crc kubenswrapper[5048]: E1213 07:23:15.193709 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb5e83839d5e9b2ee902a9035fba92407eef2d4a81c630e60525ecab2ec7bce7\": container with ID starting with eb5e83839d5e9b2ee902a9035fba92407eef2d4a81c630e60525ecab2ec7bce7 not found: ID does not exist" containerID="eb5e83839d5e9b2ee902a9035fba92407eef2d4a81c630e60525ecab2ec7bce7" Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.193750 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb5e83839d5e9b2ee902a9035fba92407eef2d4a81c630e60525ecab2ec7bce7"} err="failed to get container status \"eb5e83839d5e9b2ee902a9035fba92407eef2d4a81c630e60525ecab2ec7bce7\": rpc error: code = NotFound desc = could not find container \"eb5e83839d5e9b2ee902a9035fba92407eef2d4a81c630e60525ecab2ec7bce7\": container with ID starting with eb5e83839d5e9b2ee902a9035fba92407eef2d4a81c630e60525ecab2ec7bce7 not found: ID does not exist" Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.193774 5048 scope.go:117] "RemoveContainer" containerID="167391660ad5d1cf85e1ef41207ac2f49ad9e1d019f19918f814e8572fbc8af1" Dec 13 07:23:15 crc kubenswrapper[5048]: E1213 07:23:15.194338 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"167391660ad5d1cf85e1ef41207ac2f49ad9e1d019f19918f814e8572fbc8af1\": container with ID starting with 167391660ad5d1cf85e1ef41207ac2f49ad9e1d019f19918f814e8572fbc8af1 not found: ID does not exist" containerID="167391660ad5d1cf85e1ef41207ac2f49ad9e1d019f19918f814e8572fbc8af1" Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.194381 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"167391660ad5d1cf85e1ef41207ac2f49ad9e1d019f19918f814e8572fbc8af1"} err="failed to get container status \"167391660ad5d1cf85e1ef41207ac2f49ad9e1d019f19918f814e8572fbc8af1\": rpc error: code = NotFound desc = could not find container \"167391660ad5d1cf85e1ef41207ac2f49ad9e1d019f19918f814e8572fbc8af1\": container with ID starting with 167391660ad5d1cf85e1ef41207ac2f49ad9e1d019f19918f814e8572fbc8af1 not found: ID does not exist" Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.194410 5048 scope.go:117] "RemoveContainer" containerID="31aec63449cd7f8a0122aea0415b2f0d244d3bfaed56c7e6c743f22bd571805c" Dec 13 07:23:15 crc kubenswrapper[5048]: E1213 07:23:15.194731 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31aec63449cd7f8a0122aea0415b2f0d244d3bfaed56c7e6c743f22bd571805c\": container with ID starting with 31aec63449cd7f8a0122aea0415b2f0d244d3bfaed56c7e6c743f22bd571805c not found: ID does not exist" containerID="31aec63449cd7f8a0122aea0415b2f0d244d3bfaed56c7e6c743f22bd571805c" Dec 13 07:23:15 crc kubenswrapper[5048]: I1213 07:23:15.194750 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31aec63449cd7f8a0122aea0415b2f0d244d3bfaed56c7e6c743f22bd571805c"} err="failed to get container status \"31aec63449cd7f8a0122aea0415b2f0d244d3bfaed56c7e6c743f22bd571805c\": rpc error: code = NotFound desc = could not find container \"31aec63449cd7f8a0122aea0415b2f0d244d3bfaed56c7e6c743f22bd571805c\": container with ID starting with 31aec63449cd7f8a0122aea0415b2f0d244d3bfaed56c7e6c743f22bd571805c not found: ID does not exist" Dec 13 07:23:16 crc kubenswrapper[5048]: I1213 07:23:16.579988 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c17760c9-aea4-4fb2-9dd4-2e4bf7569738" path="/var/lib/kubelet/pods/c17760c9-aea4-4fb2-9dd4-2e4bf7569738/volumes" Dec 13 07:23:17 crc kubenswrapper[5048]: I1213 07:23:17.566414 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:23:17 crc kubenswrapper[5048]: E1213 07:23:17.566959 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:23:30 crc kubenswrapper[5048]: I1213 07:23:30.567668 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:23:30 crc kubenswrapper[5048]: E1213 07:23:30.568702 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:23:45 crc kubenswrapper[5048]: I1213 07:23:45.567290 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:23:45 crc kubenswrapper[5048]: E1213 07:23:45.568234 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:24:00 crc kubenswrapper[5048]: I1213 07:24:00.567380 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:24:00 crc kubenswrapper[5048]: E1213 07:24:00.568066 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:24:15 crc kubenswrapper[5048]: I1213 07:24:15.567989 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:24:15 crc kubenswrapper[5048]: E1213 07:24:15.568725 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:24:29 crc kubenswrapper[5048]: I1213 07:24:29.567750 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:24:29 crc kubenswrapper[5048]: E1213 07:24:29.568574 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:24:43 crc kubenswrapper[5048]: I1213 07:24:43.566802 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:24:43 crc kubenswrapper[5048]: E1213 07:24:43.567791 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:24:54 crc kubenswrapper[5048]: I1213 07:24:54.566747 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:24:55 crc kubenswrapper[5048]: I1213 07:24:55.092176 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerStarted","Data":"305e2d72c42c0e511700afcae549613ac74af39a3f7fabb14c595f37c385a74b"} Dec 13 07:27:16 crc kubenswrapper[5048]: I1213 07:27:16.215347 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:27:16 crc kubenswrapper[5048]: I1213 07:27:16.215942 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:27:46 crc kubenswrapper[5048]: I1213 07:27:46.215990 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:27:46 crc kubenswrapper[5048]: I1213 07:27:46.216611 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:27:55 crc kubenswrapper[5048]: I1213 07:27:55.268211 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lthkc"] Dec 13 07:27:55 crc kubenswrapper[5048]: E1213 07:27:55.269722 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39aadb51-9c02-47dc-a8a7-c325d36473e7" containerName="extract-content" Dec 13 07:27:55 crc kubenswrapper[5048]: I1213 07:27:55.269753 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="39aadb51-9c02-47dc-a8a7-c325d36473e7" containerName="extract-content" Dec 13 07:27:55 crc kubenswrapper[5048]: E1213 07:27:55.269791 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c17760c9-aea4-4fb2-9dd4-2e4bf7569738" containerName="registry-server" Dec 13 07:27:55 crc kubenswrapper[5048]: I1213 07:27:55.269808 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="c17760c9-aea4-4fb2-9dd4-2e4bf7569738" containerName="registry-server" Dec 13 07:27:55 crc kubenswrapper[5048]: E1213 07:27:55.269855 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c17760c9-aea4-4fb2-9dd4-2e4bf7569738" containerName="extract-content" Dec 13 07:27:55 crc kubenswrapper[5048]: I1213 07:27:55.269872 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="c17760c9-aea4-4fb2-9dd4-2e4bf7569738" containerName="extract-content" Dec 13 07:27:55 crc kubenswrapper[5048]: E1213 07:27:55.269894 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39aadb51-9c02-47dc-a8a7-c325d36473e7" containerName="registry-server" Dec 13 07:27:55 crc kubenswrapper[5048]: I1213 07:27:55.269910 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="39aadb51-9c02-47dc-a8a7-c325d36473e7" containerName="registry-server" Dec 13 07:27:55 crc kubenswrapper[5048]: E1213 07:27:55.269940 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c17760c9-aea4-4fb2-9dd4-2e4bf7569738" containerName="extract-utilities" Dec 13 07:27:55 crc kubenswrapper[5048]: I1213 07:27:55.269956 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="c17760c9-aea4-4fb2-9dd4-2e4bf7569738" containerName="extract-utilities" Dec 13 07:27:55 crc kubenswrapper[5048]: E1213 07:27:55.269997 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39aadb51-9c02-47dc-a8a7-c325d36473e7" containerName="extract-utilities" Dec 13 07:27:55 crc kubenswrapper[5048]: I1213 07:27:55.270015 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="39aadb51-9c02-47dc-a8a7-c325d36473e7" containerName="extract-utilities" Dec 13 07:27:55 crc kubenswrapper[5048]: I1213 07:27:55.270515 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="39aadb51-9c02-47dc-a8a7-c325d36473e7" containerName="registry-server" Dec 13 07:27:55 crc kubenswrapper[5048]: I1213 07:27:55.270589 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="c17760c9-aea4-4fb2-9dd4-2e4bf7569738" containerName="registry-server" Dec 13 07:27:55 crc kubenswrapper[5048]: I1213 07:27:55.273253 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lthkc" Dec 13 07:27:55 crc kubenswrapper[5048]: I1213 07:27:55.280282 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lthkc"] Dec 13 07:27:55 crc kubenswrapper[5048]: I1213 07:27:55.436050 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nrgx\" (UniqueName: \"kubernetes.io/projected/a41216ba-d29f-4ca0-8bff-4feddad27d63-kube-api-access-5nrgx\") pod \"redhat-marketplace-lthkc\" (UID: \"a41216ba-d29f-4ca0-8bff-4feddad27d63\") " pod="openshift-marketplace/redhat-marketplace-lthkc" Dec 13 07:27:55 crc kubenswrapper[5048]: I1213 07:27:55.436102 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a41216ba-d29f-4ca0-8bff-4feddad27d63-utilities\") pod \"redhat-marketplace-lthkc\" (UID: \"a41216ba-d29f-4ca0-8bff-4feddad27d63\") " pod="openshift-marketplace/redhat-marketplace-lthkc" Dec 13 07:27:55 crc kubenswrapper[5048]: I1213 07:27:55.436188 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a41216ba-d29f-4ca0-8bff-4feddad27d63-catalog-content\") pod \"redhat-marketplace-lthkc\" (UID: \"a41216ba-d29f-4ca0-8bff-4feddad27d63\") " pod="openshift-marketplace/redhat-marketplace-lthkc" Dec 13 07:27:55 crc kubenswrapper[5048]: I1213 07:27:55.539106 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nrgx\" (UniqueName: \"kubernetes.io/projected/a41216ba-d29f-4ca0-8bff-4feddad27d63-kube-api-access-5nrgx\") pod \"redhat-marketplace-lthkc\" (UID: \"a41216ba-d29f-4ca0-8bff-4feddad27d63\") " pod="openshift-marketplace/redhat-marketplace-lthkc" Dec 13 07:27:55 crc kubenswrapper[5048]: I1213 07:27:55.539147 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a41216ba-d29f-4ca0-8bff-4feddad27d63-utilities\") pod \"redhat-marketplace-lthkc\" (UID: \"a41216ba-d29f-4ca0-8bff-4feddad27d63\") " pod="openshift-marketplace/redhat-marketplace-lthkc" Dec 13 07:27:55 crc kubenswrapper[5048]: I1213 07:27:55.539224 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a41216ba-d29f-4ca0-8bff-4feddad27d63-catalog-content\") pod \"redhat-marketplace-lthkc\" (UID: \"a41216ba-d29f-4ca0-8bff-4feddad27d63\") " pod="openshift-marketplace/redhat-marketplace-lthkc" Dec 13 07:27:55 crc kubenswrapper[5048]: I1213 07:27:55.539687 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a41216ba-d29f-4ca0-8bff-4feddad27d63-catalog-content\") pod \"redhat-marketplace-lthkc\" (UID: \"a41216ba-d29f-4ca0-8bff-4feddad27d63\") " pod="openshift-marketplace/redhat-marketplace-lthkc" Dec 13 07:27:55 crc kubenswrapper[5048]: I1213 07:27:55.540055 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a41216ba-d29f-4ca0-8bff-4feddad27d63-utilities\") pod \"redhat-marketplace-lthkc\" (UID: \"a41216ba-d29f-4ca0-8bff-4feddad27d63\") " pod="openshift-marketplace/redhat-marketplace-lthkc" Dec 13 07:27:55 crc kubenswrapper[5048]: I1213 07:27:55.560819 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nrgx\" (UniqueName: \"kubernetes.io/projected/a41216ba-d29f-4ca0-8bff-4feddad27d63-kube-api-access-5nrgx\") pod \"redhat-marketplace-lthkc\" (UID: \"a41216ba-d29f-4ca0-8bff-4feddad27d63\") " pod="openshift-marketplace/redhat-marketplace-lthkc" Dec 13 07:27:55 crc kubenswrapper[5048]: I1213 07:27:55.600883 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lthkc" Dec 13 07:27:56 crc kubenswrapper[5048]: I1213 07:27:56.119184 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lthkc"] Dec 13 07:27:57 crc kubenswrapper[5048]: I1213 07:27:57.032276 5048 generic.go:334] "Generic (PLEG): container finished" podID="a41216ba-d29f-4ca0-8bff-4feddad27d63" containerID="8f84b789dd2dc3fddaad6e53e54302cc5796ae88479e3fad88d5b1be08c54c04" exitCode=0 Dec 13 07:27:57 crc kubenswrapper[5048]: I1213 07:27:57.032417 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lthkc" event={"ID":"a41216ba-d29f-4ca0-8bff-4feddad27d63","Type":"ContainerDied","Data":"8f84b789dd2dc3fddaad6e53e54302cc5796ae88479e3fad88d5b1be08c54c04"} Dec 13 07:27:57 crc kubenswrapper[5048]: I1213 07:27:57.032845 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lthkc" event={"ID":"a41216ba-d29f-4ca0-8bff-4feddad27d63","Type":"ContainerStarted","Data":"5e6da175adbf0e390a155cfad200d3926681ec3c6fcda2a1b87b6fe2d9e00be8"} Dec 13 07:27:57 crc kubenswrapper[5048]: I1213 07:27:57.034670 5048 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 13 07:27:59 crc kubenswrapper[5048]: I1213 07:27:59.057519 5048 generic.go:334] "Generic (PLEG): container finished" podID="a41216ba-d29f-4ca0-8bff-4feddad27d63" containerID="e94590b5543d8f2d653fd0ea91047270f5175b1f9a2a0b476de7edda6bd4b750" exitCode=0 Dec 13 07:27:59 crc kubenswrapper[5048]: I1213 07:27:59.057593 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lthkc" event={"ID":"a41216ba-d29f-4ca0-8bff-4feddad27d63","Type":"ContainerDied","Data":"e94590b5543d8f2d653fd0ea91047270f5175b1f9a2a0b476de7edda6bd4b750"} Dec 13 07:28:00 crc kubenswrapper[5048]: I1213 07:28:00.072811 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lthkc" event={"ID":"a41216ba-d29f-4ca0-8bff-4feddad27d63","Type":"ContainerStarted","Data":"eb1b9c6f45d7f6bb0e428e38d50e50150a6b92ebb0d328c84d25fac508b5cb7f"} Dec 13 07:28:00 crc kubenswrapper[5048]: I1213 07:28:00.102508 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lthkc" podStartSLOduration=2.624226856 podStartE2EDuration="5.102472154s" podCreationTimestamp="2025-12-13 07:27:55 +0000 UTC" firstStartedPulling="2025-12-13 07:27:57.034134783 +0000 UTC m=+3510.900729374" lastFinishedPulling="2025-12-13 07:27:59.512380081 +0000 UTC m=+3513.378974672" observedRunningTime="2025-12-13 07:28:00.098236021 +0000 UTC m=+3513.964830622" watchObservedRunningTime="2025-12-13 07:28:00.102472154 +0000 UTC m=+3513.969066795" Dec 13 07:28:05 crc kubenswrapper[5048]: I1213 07:28:05.601190 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lthkc" Dec 13 07:28:05 crc kubenswrapper[5048]: I1213 07:28:05.601711 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lthkc" Dec 13 07:28:05 crc kubenswrapper[5048]: I1213 07:28:05.645354 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lthkc" Dec 13 07:28:06 crc kubenswrapper[5048]: I1213 07:28:06.175084 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lthkc" Dec 13 07:28:06 crc kubenswrapper[5048]: I1213 07:28:06.222301 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lthkc"] Dec 13 07:28:08 crc kubenswrapper[5048]: I1213 07:28:08.140714 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lthkc" podUID="a41216ba-d29f-4ca0-8bff-4feddad27d63" containerName="registry-server" containerID="cri-o://eb1b9c6f45d7f6bb0e428e38d50e50150a6b92ebb0d328c84d25fac508b5cb7f" gracePeriod=2 Dec 13 07:28:08 crc kubenswrapper[5048]: I1213 07:28:08.693988 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lthkc" Dec 13 07:28:08 crc kubenswrapper[5048]: I1213 07:28:08.785800 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a41216ba-d29f-4ca0-8bff-4feddad27d63-catalog-content\") pod \"a41216ba-d29f-4ca0-8bff-4feddad27d63\" (UID: \"a41216ba-d29f-4ca0-8bff-4feddad27d63\") " Dec 13 07:28:08 crc kubenswrapper[5048]: I1213 07:28:08.786033 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a41216ba-d29f-4ca0-8bff-4feddad27d63-utilities\") pod \"a41216ba-d29f-4ca0-8bff-4feddad27d63\" (UID: \"a41216ba-d29f-4ca0-8bff-4feddad27d63\") " Dec 13 07:28:08 crc kubenswrapper[5048]: I1213 07:28:08.786294 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5nrgx\" (UniqueName: \"kubernetes.io/projected/a41216ba-d29f-4ca0-8bff-4feddad27d63-kube-api-access-5nrgx\") pod \"a41216ba-d29f-4ca0-8bff-4feddad27d63\" (UID: \"a41216ba-d29f-4ca0-8bff-4feddad27d63\") " Dec 13 07:28:08 crc kubenswrapper[5048]: I1213 07:28:08.787560 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a41216ba-d29f-4ca0-8bff-4feddad27d63-utilities" (OuterVolumeSpecName: "utilities") pod "a41216ba-d29f-4ca0-8bff-4feddad27d63" (UID: "a41216ba-d29f-4ca0-8bff-4feddad27d63"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:28:08 crc kubenswrapper[5048]: I1213 07:28:08.793282 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a41216ba-d29f-4ca0-8bff-4feddad27d63-kube-api-access-5nrgx" (OuterVolumeSpecName: "kube-api-access-5nrgx") pod "a41216ba-d29f-4ca0-8bff-4feddad27d63" (UID: "a41216ba-d29f-4ca0-8bff-4feddad27d63"). InnerVolumeSpecName "kube-api-access-5nrgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:28:08 crc kubenswrapper[5048]: I1213 07:28:08.888299 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a41216ba-d29f-4ca0-8bff-4feddad27d63-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 07:28:08 crc kubenswrapper[5048]: I1213 07:28:08.888348 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5nrgx\" (UniqueName: \"kubernetes.io/projected/a41216ba-d29f-4ca0-8bff-4feddad27d63-kube-api-access-5nrgx\") on node \"crc\" DevicePath \"\"" Dec 13 07:28:08 crc kubenswrapper[5048]: I1213 07:28:08.952406 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a41216ba-d29f-4ca0-8bff-4feddad27d63-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a41216ba-d29f-4ca0-8bff-4feddad27d63" (UID: "a41216ba-d29f-4ca0-8bff-4feddad27d63"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:28:08 crc kubenswrapper[5048]: I1213 07:28:08.989771 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a41216ba-d29f-4ca0-8bff-4feddad27d63-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 07:28:09 crc kubenswrapper[5048]: I1213 07:28:09.150851 5048 generic.go:334] "Generic (PLEG): container finished" podID="a41216ba-d29f-4ca0-8bff-4feddad27d63" containerID="eb1b9c6f45d7f6bb0e428e38d50e50150a6b92ebb0d328c84d25fac508b5cb7f" exitCode=0 Dec 13 07:28:09 crc kubenswrapper[5048]: I1213 07:28:09.150908 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lthkc" event={"ID":"a41216ba-d29f-4ca0-8bff-4feddad27d63","Type":"ContainerDied","Data":"eb1b9c6f45d7f6bb0e428e38d50e50150a6b92ebb0d328c84d25fac508b5cb7f"} Dec 13 07:28:09 crc kubenswrapper[5048]: I1213 07:28:09.150941 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lthkc" event={"ID":"a41216ba-d29f-4ca0-8bff-4feddad27d63","Type":"ContainerDied","Data":"5e6da175adbf0e390a155cfad200d3926681ec3c6fcda2a1b87b6fe2d9e00be8"} Dec 13 07:28:09 crc kubenswrapper[5048]: I1213 07:28:09.150963 5048 scope.go:117] "RemoveContainer" containerID="eb1b9c6f45d7f6bb0e428e38d50e50150a6b92ebb0d328c84d25fac508b5cb7f" Dec 13 07:28:09 crc kubenswrapper[5048]: I1213 07:28:09.151140 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lthkc" Dec 13 07:28:09 crc kubenswrapper[5048]: I1213 07:28:09.181165 5048 scope.go:117] "RemoveContainer" containerID="e94590b5543d8f2d653fd0ea91047270f5175b1f9a2a0b476de7edda6bd4b750" Dec 13 07:28:09 crc kubenswrapper[5048]: I1213 07:28:09.197932 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lthkc"] Dec 13 07:28:09 crc kubenswrapper[5048]: I1213 07:28:09.211701 5048 scope.go:117] "RemoveContainer" containerID="8f84b789dd2dc3fddaad6e53e54302cc5796ae88479e3fad88d5b1be08c54c04" Dec 13 07:28:09 crc kubenswrapper[5048]: I1213 07:28:09.213534 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lthkc"] Dec 13 07:28:09 crc kubenswrapper[5048]: I1213 07:28:09.259988 5048 scope.go:117] "RemoveContainer" containerID="eb1b9c6f45d7f6bb0e428e38d50e50150a6b92ebb0d328c84d25fac508b5cb7f" Dec 13 07:28:09 crc kubenswrapper[5048]: E1213 07:28:09.260752 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb1b9c6f45d7f6bb0e428e38d50e50150a6b92ebb0d328c84d25fac508b5cb7f\": container with ID starting with eb1b9c6f45d7f6bb0e428e38d50e50150a6b92ebb0d328c84d25fac508b5cb7f not found: ID does not exist" containerID="eb1b9c6f45d7f6bb0e428e38d50e50150a6b92ebb0d328c84d25fac508b5cb7f" Dec 13 07:28:09 crc kubenswrapper[5048]: I1213 07:28:09.260831 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb1b9c6f45d7f6bb0e428e38d50e50150a6b92ebb0d328c84d25fac508b5cb7f"} err="failed to get container status \"eb1b9c6f45d7f6bb0e428e38d50e50150a6b92ebb0d328c84d25fac508b5cb7f\": rpc error: code = NotFound desc = could not find container \"eb1b9c6f45d7f6bb0e428e38d50e50150a6b92ebb0d328c84d25fac508b5cb7f\": container with ID starting with eb1b9c6f45d7f6bb0e428e38d50e50150a6b92ebb0d328c84d25fac508b5cb7f not found: ID does not exist" Dec 13 07:28:09 crc kubenswrapper[5048]: I1213 07:28:09.260860 5048 scope.go:117] "RemoveContainer" containerID="e94590b5543d8f2d653fd0ea91047270f5175b1f9a2a0b476de7edda6bd4b750" Dec 13 07:28:09 crc kubenswrapper[5048]: E1213 07:28:09.261161 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e94590b5543d8f2d653fd0ea91047270f5175b1f9a2a0b476de7edda6bd4b750\": container with ID starting with e94590b5543d8f2d653fd0ea91047270f5175b1f9a2a0b476de7edda6bd4b750 not found: ID does not exist" containerID="e94590b5543d8f2d653fd0ea91047270f5175b1f9a2a0b476de7edda6bd4b750" Dec 13 07:28:09 crc kubenswrapper[5048]: I1213 07:28:09.261193 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e94590b5543d8f2d653fd0ea91047270f5175b1f9a2a0b476de7edda6bd4b750"} err="failed to get container status \"e94590b5543d8f2d653fd0ea91047270f5175b1f9a2a0b476de7edda6bd4b750\": rpc error: code = NotFound desc = could not find container \"e94590b5543d8f2d653fd0ea91047270f5175b1f9a2a0b476de7edda6bd4b750\": container with ID starting with e94590b5543d8f2d653fd0ea91047270f5175b1f9a2a0b476de7edda6bd4b750 not found: ID does not exist" Dec 13 07:28:09 crc kubenswrapper[5048]: I1213 07:28:09.261215 5048 scope.go:117] "RemoveContainer" containerID="8f84b789dd2dc3fddaad6e53e54302cc5796ae88479e3fad88d5b1be08c54c04" Dec 13 07:28:09 crc kubenswrapper[5048]: E1213 07:28:09.261656 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f84b789dd2dc3fddaad6e53e54302cc5796ae88479e3fad88d5b1be08c54c04\": container with ID starting with 8f84b789dd2dc3fddaad6e53e54302cc5796ae88479e3fad88d5b1be08c54c04 not found: ID does not exist" containerID="8f84b789dd2dc3fddaad6e53e54302cc5796ae88479e3fad88d5b1be08c54c04" Dec 13 07:28:09 crc kubenswrapper[5048]: I1213 07:28:09.261688 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f84b789dd2dc3fddaad6e53e54302cc5796ae88479e3fad88d5b1be08c54c04"} err="failed to get container status \"8f84b789dd2dc3fddaad6e53e54302cc5796ae88479e3fad88d5b1be08c54c04\": rpc error: code = NotFound desc = could not find container \"8f84b789dd2dc3fddaad6e53e54302cc5796ae88479e3fad88d5b1be08c54c04\": container with ID starting with 8f84b789dd2dc3fddaad6e53e54302cc5796ae88479e3fad88d5b1be08c54c04 not found: ID does not exist" Dec 13 07:28:10 crc kubenswrapper[5048]: I1213 07:28:10.584912 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a41216ba-d29f-4ca0-8bff-4feddad27d63" path="/var/lib/kubelet/pods/a41216ba-d29f-4ca0-8bff-4feddad27d63/volumes" Dec 13 07:28:16 crc kubenswrapper[5048]: I1213 07:28:16.216488 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:28:16 crc kubenswrapper[5048]: I1213 07:28:16.217107 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:28:16 crc kubenswrapper[5048]: I1213 07:28:16.217156 5048 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 07:28:16 crc kubenswrapper[5048]: I1213 07:28:16.218004 5048 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"305e2d72c42c0e511700afcae549613ac74af39a3f7fabb14c595f37c385a74b"} pod="openshift-machine-config-operator/machine-config-daemon-j7hns" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 13 07:28:16 crc kubenswrapper[5048]: I1213 07:28:16.218067 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" containerID="cri-o://305e2d72c42c0e511700afcae549613ac74af39a3f7fabb14c595f37c385a74b" gracePeriod=600 Dec 13 07:28:17 crc kubenswrapper[5048]: I1213 07:28:17.226956 5048 generic.go:334] "Generic (PLEG): container finished" podID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerID="305e2d72c42c0e511700afcae549613ac74af39a3f7fabb14c595f37c385a74b" exitCode=0 Dec 13 07:28:17 crc kubenswrapper[5048]: I1213 07:28:17.227065 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerDied","Data":"305e2d72c42c0e511700afcae549613ac74af39a3f7fabb14c595f37c385a74b"} Dec 13 07:28:17 crc kubenswrapper[5048]: I1213 07:28:17.227864 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerStarted","Data":"816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7"} Dec 13 07:28:17 crc kubenswrapper[5048]: I1213 07:28:17.227913 5048 scope.go:117] "RemoveContainer" containerID="fbbb55828e04cb7da8203e5851a4c6374f3307372c72b9f414bc411afda46023" Dec 13 07:29:26 crc kubenswrapper[5048]: I1213 07:29:26.946185 5048 generic.go:334] "Generic (PLEG): container finished" podID="b1419e46-ce93-452b-9d88-b9a50e9dbfe6" containerID="49b4813fe256ca1a5852891e6d463c68f80fa4d2692fb2678370533804292f05" exitCode=0 Dec 13 07:29:26 crc kubenswrapper[5048]: I1213 07:29:26.946265 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"b1419e46-ce93-452b-9d88-b9a50e9dbfe6","Type":"ContainerDied","Data":"49b4813fe256ca1a5852891e6d463c68f80fa4d2692fb2678370533804292f05"} Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.350370 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.451681 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-ca-certs\") pod \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.451804 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-test-operator-ephemeral-temporary\") pod \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.451842 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-openstack-config-secret\") pod \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.451869 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xs2d8\" (UniqueName: \"kubernetes.io/projected/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-kube-api-access-xs2d8\") pod \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.451901 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-config-data\") pod \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.452019 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.452049 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-ssh-key\") pod \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.452068 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-openstack-config\") pod \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.452125 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-test-operator-ephemeral-workdir\") pod \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\" (UID: \"b1419e46-ce93-452b-9d88-b9a50e9dbfe6\") " Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.452188 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "b1419e46-ce93-452b-9d88-b9a50e9dbfe6" (UID: "b1419e46-ce93-452b-9d88-b9a50e9dbfe6"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.453048 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-config-data" (OuterVolumeSpecName: "config-data") pod "b1419e46-ce93-452b-9d88-b9a50e9dbfe6" (UID: "b1419e46-ce93-452b-9d88-b9a50e9dbfe6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.453346 5048 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.453402 5048 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-config-data\") on node \"crc\" DevicePath \"\"" Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.461998 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "b1419e46-ce93-452b-9d88-b9a50e9dbfe6" (UID: "b1419e46-ce93-452b-9d88-b9a50e9dbfe6"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.465644 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-kube-api-access-xs2d8" (OuterVolumeSpecName: "kube-api-access-xs2d8") pod "b1419e46-ce93-452b-9d88-b9a50e9dbfe6" (UID: "b1419e46-ce93-452b-9d88-b9a50e9dbfe6"). InnerVolumeSpecName "kube-api-access-xs2d8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.470059 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "test-operator-logs") pod "b1419e46-ce93-452b-9d88-b9a50e9dbfe6" (UID: "b1419e46-ce93-452b-9d88-b9a50e9dbfe6"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.486866 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b1419e46-ce93-452b-9d88-b9a50e9dbfe6" (UID: "b1419e46-ce93-452b-9d88-b9a50e9dbfe6"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.507717 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "b1419e46-ce93-452b-9d88-b9a50e9dbfe6" (UID: "b1419e46-ce93-452b-9d88-b9a50e9dbfe6"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.520923 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "b1419e46-ce93-452b-9d88-b9a50e9dbfe6" (UID: "b1419e46-ce93-452b-9d88-b9a50e9dbfe6"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.521170 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "b1419e46-ce93-452b-9d88-b9a50e9dbfe6" (UID: "b1419e46-ce93-452b-9d88-b9a50e9dbfe6"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.554938 5048 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.554965 5048 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-ca-certs\") on node \"crc\" DevicePath \"\"" Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.554976 5048 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.554987 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xs2d8\" (UniqueName: \"kubernetes.io/projected/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-kube-api-access-xs2d8\") on node \"crc\" DevicePath \"\"" Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.555016 5048 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.555026 5048 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.555034 5048 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b1419e46-ce93-452b-9d88-b9a50e9dbfe6-openstack-config\") on node \"crc\" DevicePath \"\"" Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.574099 5048 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.656088 5048 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.969173 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"b1419e46-ce93-452b-9d88-b9a50e9dbfe6","Type":"ContainerDied","Data":"5af9a89c2dbd5f3b0ae4587b27ac05b508f29e462e070d911e8e97bbda108d8d"} Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.969219 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5af9a89c2dbd5f3b0ae4587b27ac05b508f29e462e070d911e8e97bbda108d8d" Dec 13 07:29:28 crc kubenswrapper[5048]: I1213 07:29:28.969230 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Dec 13 07:29:40 crc kubenswrapper[5048]: I1213 07:29:40.805832 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Dec 13 07:29:40 crc kubenswrapper[5048]: E1213 07:29:40.807204 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a41216ba-d29f-4ca0-8bff-4feddad27d63" containerName="extract-utilities" Dec 13 07:29:40 crc kubenswrapper[5048]: I1213 07:29:40.807265 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="a41216ba-d29f-4ca0-8bff-4feddad27d63" containerName="extract-utilities" Dec 13 07:29:40 crc kubenswrapper[5048]: E1213 07:29:40.807320 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1419e46-ce93-452b-9d88-b9a50e9dbfe6" containerName="tempest-tests-tempest-tests-runner" Dec 13 07:29:40 crc kubenswrapper[5048]: I1213 07:29:40.807335 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1419e46-ce93-452b-9d88-b9a50e9dbfe6" containerName="tempest-tests-tempest-tests-runner" Dec 13 07:29:40 crc kubenswrapper[5048]: E1213 07:29:40.807360 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a41216ba-d29f-4ca0-8bff-4feddad27d63" containerName="registry-server" Dec 13 07:29:40 crc kubenswrapper[5048]: I1213 07:29:40.807374 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="a41216ba-d29f-4ca0-8bff-4feddad27d63" containerName="registry-server" Dec 13 07:29:40 crc kubenswrapper[5048]: E1213 07:29:40.807414 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a41216ba-d29f-4ca0-8bff-4feddad27d63" containerName="extract-content" Dec 13 07:29:40 crc kubenswrapper[5048]: I1213 07:29:40.807428 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="a41216ba-d29f-4ca0-8bff-4feddad27d63" containerName="extract-content" Dec 13 07:29:40 crc kubenswrapper[5048]: I1213 07:29:40.807785 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="a41216ba-d29f-4ca0-8bff-4feddad27d63" containerName="registry-server" Dec 13 07:29:40 crc kubenswrapper[5048]: I1213 07:29:40.807814 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1419e46-ce93-452b-9d88-b9a50e9dbfe6" containerName="tempest-tests-tempest-tests-runner" Dec 13 07:29:40 crc kubenswrapper[5048]: I1213 07:29:40.809014 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 13 07:29:40 crc kubenswrapper[5048]: I1213 07:29:40.813055 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-g44hk" Dec 13 07:29:40 crc kubenswrapper[5048]: I1213 07:29:40.823262 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Dec 13 07:29:40 crc kubenswrapper[5048]: I1213 07:29:40.920878 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"036a76e2-0363-4fe5-98fc-283eea6536e6\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 13 07:29:40 crc kubenswrapper[5048]: I1213 07:29:40.920933 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4fxb5\" (UniqueName: \"kubernetes.io/projected/036a76e2-0363-4fe5-98fc-283eea6536e6-kube-api-access-4fxb5\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"036a76e2-0363-4fe5-98fc-283eea6536e6\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 13 07:29:41 crc kubenswrapper[5048]: I1213 07:29:41.023189 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"036a76e2-0363-4fe5-98fc-283eea6536e6\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 13 07:29:41 crc kubenswrapper[5048]: I1213 07:29:41.023246 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4fxb5\" (UniqueName: \"kubernetes.io/projected/036a76e2-0363-4fe5-98fc-283eea6536e6-kube-api-access-4fxb5\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"036a76e2-0363-4fe5-98fc-283eea6536e6\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 13 07:29:41 crc kubenswrapper[5048]: I1213 07:29:41.023962 5048 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"036a76e2-0363-4fe5-98fc-283eea6536e6\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 13 07:29:41 crc kubenswrapper[5048]: I1213 07:29:41.042508 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4fxb5\" (UniqueName: \"kubernetes.io/projected/036a76e2-0363-4fe5-98fc-283eea6536e6-kube-api-access-4fxb5\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"036a76e2-0363-4fe5-98fc-283eea6536e6\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 13 07:29:41 crc kubenswrapper[5048]: I1213 07:29:41.056328 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"036a76e2-0363-4fe5-98fc-283eea6536e6\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 13 07:29:41 crc kubenswrapper[5048]: I1213 07:29:41.137557 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 13 07:29:41 crc kubenswrapper[5048]: I1213 07:29:41.618953 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Dec 13 07:29:42 crc kubenswrapper[5048]: I1213 07:29:42.109823 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"036a76e2-0363-4fe5-98fc-283eea6536e6","Type":"ContainerStarted","Data":"6f4940f5dc5722ea70c2db0e0eff1b7eb670ccafb6d917cb9038050032419dec"} Dec 13 07:29:44 crc kubenswrapper[5048]: I1213 07:29:44.135091 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"036a76e2-0363-4fe5-98fc-283eea6536e6","Type":"ContainerStarted","Data":"1446dd282e432dd7c56023e06cde846eb2d5fce2946c042a34d4e2d39c78e6c4"} Dec 13 07:29:44 crc kubenswrapper[5048]: I1213 07:29:44.167132 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=3.124535647 podStartE2EDuration="4.16710165s" podCreationTimestamp="2025-12-13 07:29:40 +0000 UTC" firstStartedPulling="2025-12-13 07:29:41.630306808 +0000 UTC m=+3615.496901389" lastFinishedPulling="2025-12-13 07:29:42.672872781 +0000 UTC m=+3616.539467392" observedRunningTime="2025-12-13 07:29:44.152726862 +0000 UTC m=+3618.019321483" watchObservedRunningTime="2025-12-13 07:29:44.16710165 +0000 UTC m=+3618.033696271" Dec 13 07:30:00 crc kubenswrapper[5048]: I1213 07:30:00.162234 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426850-c22c7"] Dec 13 07:30:00 crc kubenswrapper[5048]: I1213 07:30:00.164146 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426850-c22c7" Dec 13 07:30:00 crc kubenswrapper[5048]: I1213 07:30:00.168214 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 13 07:30:00 crc kubenswrapper[5048]: I1213 07:30:00.168331 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 13 07:30:00 crc kubenswrapper[5048]: I1213 07:30:00.182010 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426850-c22c7"] Dec 13 07:30:00 crc kubenswrapper[5048]: I1213 07:30:00.232372 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7d133520-c84f-4731-96bb-85a851337e85-config-volume\") pod \"collect-profiles-29426850-c22c7\" (UID: \"7d133520-c84f-4731-96bb-85a851337e85\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426850-c22c7" Dec 13 07:30:00 crc kubenswrapper[5048]: I1213 07:30:00.232416 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsbrl\" (UniqueName: \"kubernetes.io/projected/7d133520-c84f-4731-96bb-85a851337e85-kube-api-access-xsbrl\") pod \"collect-profiles-29426850-c22c7\" (UID: \"7d133520-c84f-4731-96bb-85a851337e85\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426850-c22c7" Dec 13 07:30:00 crc kubenswrapper[5048]: I1213 07:30:00.232652 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7d133520-c84f-4731-96bb-85a851337e85-secret-volume\") pod \"collect-profiles-29426850-c22c7\" (UID: \"7d133520-c84f-4731-96bb-85a851337e85\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426850-c22c7" Dec 13 07:30:00 crc kubenswrapper[5048]: I1213 07:30:00.334505 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7d133520-c84f-4731-96bb-85a851337e85-config-volume\") pod \"collect-profiles-29426850-c22c7\" (UID: \"7d133520-c84f-4731-96bb-85a851337e85\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426850-c22c7" Dec 13 07:30:00 crc kubenswrapper[5048]: I1213 07:30:00.334600 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsbrl\" (UniqueName: \"kubernetes.io/projected/7d133520-c84f-4731-96bb-85a851337e85-kube-api-access-xsbrl\") pod \"collect-profiles-29426850-c22c7\" (UID: \"7d133520-c84f-4731-96bb-85a851337e85\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426850-c22c7" Dec 13 07:30:00 crc kubenswrapper[5048]: I1213 07:30:00.334930 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7d133520-c84f-4731-96bb-85a851337e85-secret-volume\") pod \"collect-profiles-29426850-c22c7\" (UID: \"7d133520-c84f-4731-96bb-85a851337e85\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426850-c22c7" Dec 13 07:30:00 crc kubenswrapper[5048]: I1213 07:30:00.336524 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7d133520-c84f-4731-96bb-85a851337e85-config-volume\") pod \"collect-profiles-29426850-c22c7\" (UID: \"7d133520-c84f-4731-96bb-85a851337e85\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426850-c22c7" Dec 13 07:30:00 crc kubenswrapper[5048]: I1213 07:30:00.345231 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7d133520-c84f-4731-96bb-85a851337e85-secret-volume\") pod \"collect-profiles-29426850-c22c7\" (UID: \"7d133520-c84f-4731-96bb-85a851337e85\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426850-c22c7" Dec 13 07:30:00 crc kubenswrapper[5048]: I1213 07:30:00.360679 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsbrl\" (UniqueName: \"kubernetes.io/projected/7d133520-c84f-4731-96bb-85a851337e85-kube-api-access-xsbrl\") pod \"collect-profiles-29426850-c22c7\" (UID: \"7d133520-c84f-4731-96bb-85a851337e85\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426850-c22c7" Dec 13 07:30:00 crc kubenswrapper[5048]: I1213 07:30:00.487997 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426850-c22c7" Dec 13 07:30:01 crc kubenswrapper[5048]: I1213 07:30:01.000364 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426850-c22c7"] Dec 13 07:30:01 crc kubenswrapper[5048]: W1213 07:30:01.004829 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7d133520_c84f_4731_96bb_85a851337e85.slice/crio-564bad691092a95ebfc71a9d28637aa2dcdd15dda3451ee466a6572c20b491f7 WatchSource:0}: Error finding container 564bad691092a95ebfc71a9d28637aa2dcdd15dda3451ee466a6572c20b491f7: Status 404 returned error can't find the container with id 564bad691092a95ebfc71a9d28637aa2dcdd15dda3451ee466a6572c20b491f7 Dec 13 07:30:01 crc kubenswrapper[5048]: I1213 07:30:01.314165 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29426850-c22c7" event={"ID":"7d133520-c84f-4731-96bb-85a851337e85","Type":"ContainerStarted","Data":"07a1d1a1e1b0c75109ff83db3d8dc4209994d7fd6e0977eb1699ad66eff562a2"} Dec 13 07:30:01 crc kubenswrapper[5048]: I1213 07:30:01.314534 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29426850-c22c7" event={"ID":"7d133520-c84f-4731-96bb-85a851337e85","Type":"ContainerStarted","Data":"564bad691092a95ebfc71a9d28637aa2dcdd15dda3451ee466a6572c20b491f7"} Dec 13 07:30:01 crc kubenswrapper[5048]: I1213 07:30:01.345029 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29426850-c22c7" podStartSLOduration=1.344998428 podStartE2EDuration="1.344998428s" podCreationTimestamp="2025-12-13 07:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 07:30:01.333361614 +0000 UTC m=+3635.199956195" watchObservedRunningTime="2025-12-13 07:30:01.344998428 +0000 UTC m=+3635.211593049" Dec 13 07:30:02 crc kubenswrapper[5048]: I1213 07:30:02.328102 5048 generic.go:334] "Generic (PLEG): container finished" podID="7d133520-c84f-4731-96bb-85a851337e85" containerID="07a1d1a1e1b0c75109ff83db3d8dc4209994d7fd6e0977eb1699ad66eff562a2" exitCode=0 Dec 13 07:30:02 crc kubenswrapper[5048]: I1213 07:30:02.328174 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29426850-c22c7" event={"ID":"7d133520-c84f-4731-96bb-85a851337e85","Type":"ContainerDied","Data":"07a1d1a1e1b0c75109ff83db3d8dc4209994d7fd6e0977eb1699ad66eff562a2"} Dec 13 07:30:03 crc kubenswrapper[5048]: I1213 07:30:03.696902 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426850-c22c7" Dec 13 07:30:03 crc kubenswrapper[5048]: I1213 07:30:03.705457 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7d133520-c84f-4731-96bb-85a851337e85-secret-volume\") pod \"7d133520-c84f-4731-96bb-85a851337e85\" (UID: \"7d133520-c84f-4731-96bb-85a851337e85\") " Dec 13 07:30:03 crc kubenswrapper[5048]: I1213 07:30:03.705734 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7d133520-c84f-4731-96bb-85a851337e85-config-volume\") pod \"7d133520-c84f-4731-96bb-85a851337e85\" (UID: \"7d133520-c84f-4731-96bb-85a851337e85\") " Dec 13 07:30:03 crc kubenswrapper[5048]: I1213 07:30:03.705967 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xsbrl\" (UniqueName: \"kubernetes.io/projected/7d133520-c84f-4731-96bb-85a851337e85-kube-api-access-xsbrl\") pod \"7d133520-c84f-4731-96bb-85a851337e85\" (UID: \"7d133520-c84f-4731-96bb-85a851337e85\") " Dec 13 07:30:03 crc kubenswrapper[5048]: I1213 07:30:03.706558 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d133520-c84f-4731-96bb-85a851337e85-config-volume" (OuterVolumeSpecName: "config-volume") pod "7d133520-c84f-4731-96bb-85a851337e85" (UID: "7d133520-c84f-4731-96bb-85a851337e85"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 07:30:03 crc kubenswrapper[5048]: I1213 07:30:03.711920 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d133520-c84f-4731-96bb-85a851337e85-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "7d133520-c84f-4731-96bb-85a851337e85" (UID: "7d133520-c84f-4731-96bb-85a851337e85"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:30:03 crc kubenswrapper[5048]: I1213 07:30:03.712606 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d133520-c84f-4731-96bb-85a851337e85-kube-api-access-xsbrl" (OuterVolumeSpecName: "kube-api-access-xsbrl") pod "7d133520-c84f-4731-96bb-85a851337e85" (UID: "7d133520-c84f-4731-96bb-85a851337e85"). InnerVolumeSpecName "kube-api-access-xsbrl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:30:03 crc kubenswrapper[5048]: I1213 07:30:03.808419 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xsbrl\" (UniqueName: \"kubernetes.io/projected/7d133520-c84f-4731-96bb-85a851337e85-kube-api-access-xsbrl\") on node \"crc\" DevicePath \"\"" Dec 13 07:30:03 crc kubenswrapper[5048]: I1213 07:30:03.808465 5048 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7d133520-c84f-4731-96bb-85a851337e85-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 13 07:30:03 crc kubenswrapper[5048]: I1213 07:30:03.808474 5048 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7d133520-c84f-4731-96bb-85a851337e85-config-volume\") on node \"crc\" DevicePath \"\"" Dec 13 07:30:04 crc kubenswrapper[5048]: I1213 07:30:04.348039 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29426850-c22c7" event={"ID":"7d133520-c84f-4731-96bb-85a851337e85","Type":"ContainerDied","Data":"564bad691092a95ebfc71a9d28637aa2dcdd15dda3451ee466a6572c20b491f7"} Dec 13 07:30:04 crc kubenswrapper[5048]: I1213 07:30:04.348321 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="564bad691092a95ebfc71a9d28637aa2dcdd15dda3451ee466a6572c20b491f7" Dec 13 07:30:04 crc kubenswrapper[5048]: I1213 07:30:04.348138 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426850-c22c7" Dec 13 07:30:04 crc kubenswrapper[5048]: I1213 07:30:04.403527 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426805-cqfrx"] Dec 13 07:30:04 crc kubenswrapper[5048]: I1213 07:30:04.410825 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426805-cqfrx"] Dec 13 07:30:04 crc kubenswrapper[5048]: I1213 07:30:04.581969 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6496a13-bbc2-4049-b82b-3a717d832fc7" path="/var/lib/kubelet/pods/e6496a13-bbc2-4049-b82b-3a717d832fc7/volumes" Dec 13 07:30:07 crc kubenswrapper[5048]: I1213 07:30:07.993861 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-n9sbw/must-gather-whcvp"] Dec 13 07:30:07 crc kubenswrapper[5048]: E1213 07:30:07.994570 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d133520-c84f-4731-96bb-85a851337e85" containerName="collect-profiles" Dec 13 07:30:07 crc kubenswrapper[5048]: I1213 07:30:07.994583 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d133520-c84f-4731-96bb-85a851337e85" containerName="collect-profiles" Dec 13 07:30:07 crc kubenswrapper[5048]: I1213 07:30:07.994840 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d133520-c84f-4731-96bb-85a851337e85" containerName="collect-profiles" Dec 13 07:30:07 crc kubenswrapper[5048]: I1213 07:30:07.995893 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-n9sbw/must-gather-whcvp" Dec 13 07:30:07 crc kubenswrapper[5048]: I1213 07:30:07.997615 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-n9sbw"/"default-dockercfg-cwddh" Dec 13 07:30:07 crc kubenswrapper[5048]: I1213 07:30:07.998598 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-n9sbw"/"kube-root-ca.crt" Dec 13 07:30:07 crc kubenswrapper[5048]: I1213 07:30:07.999766 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-n9sbw"/"openshift-service-ca.crt" Dec 13 07:30:08 crc kubenswrapper[5048]: I1213 07:30:08.002354 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-n9sbw/must-gather-whcvp"] Dec 13 07:30:08 crc kubenswrapper[5048]: I1213 07:30:08.192123 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95tnp\" (UniqueName: \"kubernetes.io/projected/59caec1f-b50d-4c55-9b3f-9e646f0642ec-kube-api-access-95tnp\") pod \"must-gather-whcvp\" (UID: \"59caec1f-b50d-4c55-9b3f-9e646f0642ec\") " pod="openshift-must-gather-n9sbw/must-gather-whcvp" Dec 13 07:30:08 crc kubenswrapper[5048]: I1213 07:30:08.192222 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/59caec1f-b50d-4c55-9b3f-9e646f0642ec-must-gather-output\") pod \"must-gather-whcvp\" (UID: \"59caec1f-b50d-4c55-9b3f-9e646f0642ec\") " pod="openshift-must-gather-n9sbw/must-gather-whcvp" Dec 13 07:30:08 crc kubenswrapper[5048]: I1213 07:30:08.294355 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95tnp\" (UniqueName: \"kubernetes.io/projected/59caec1f-b50d-4c55-9b3f-9e646f0642ec-kube-api-access-95tnp\") pod \"must-gather-whcvp\" (UID: \"59caec1f-b50d-4c55-9b3f-9e646f0642ec\") " pod="openshift-must-gather-n9sbw/must-gather-whcvp" Dec 13 07:30:08 crc kubenswrapper[5048]: I1213 07:30:08.294445 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/59caec1f-b50d-4c55-9b3f-9e646f0642ec-must-gather-output\") pod \"must-gather-whcvp\" (UID: \"59caec1f-b50d-4c55-9b3f-9e646f0642ec\") " pod="openshift-must-gather-n9sbw/must-gather-whcvp" Dec 13 07:30:08 crc kubenswrapper[5048]: I1213 07:30:08.295461 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/59caec1f-b50d-4c55-9b3f-9e646f0642ec-must-gather-output\") pod \"must-gather-whcvp\" (UID: \"59caec1f-b50d-4c55-9b3f-9e646f0642ec\") " pod="openshift-must-gather-n9sbw/must-gather-whcvp" Dec 13 07:30:08 crc kubenswrapper[5048]: I1213 07:30:08.313526 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95tnp\" (UniqueName: \"kubernetes.io/projected/59caec1f-b50d-4c55-9b3f-9e646f0642ec-kube-api-access-95tnp\") pod \"must-gather-whcvp\" (UID: \"59caec1f-b50d-4c55-9b3f-9e646f0642ec\") " pod="openshift-must-gather-n9sbw/must-gather-whcvp" Dec 13 07:30:08 crc kubenswrapper[5048]: I1213 07:30:08.318259 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-n9sbw/must-gather-whcvp" Dec 13 07:30:08 crc kubenswrapper[5048]: I1213 07:30:08.838649 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-n9sbw/must-gather-whcvp"] Dec 13 07:30:09 crc kubenswrapper[5048]: I1213 07:30:09.398107 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-n9sbw/must-gather-whcvp" event={"ID":"59caec1f-b50d-4c55-9b3f-9e646f0642ec","Type":"ContainerStarted","Data":"bd2a624eb42bc75f5e61484273a815218e604182247b5bf0a8033c4d0aa926b7"} Dec 13 07:30:11 crc kubenswrapper[5048]: I1213 07:30:11.454835 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-n5jqk"] Dec 13 07:30:11 crc kubenswrapper[5048]: I1213 07:30:11.464966 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-n5jqk"] Dec 13 07:30:11 crc kubenswrapper[5048]: I1213 07:30:11.465121 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-n5jqk" Dec 13 07:30:11 crc kubenswrapper[5048]: I1213 07:30:11.565943 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f615071-be66-410f-a5fd-41422b9932c9-catalog-content\") pod \"redhat-operators-n5jqk\" (UID: \"6f615071-be66-410f-a5fd-41422b9932c9\") " pod="openshift-marketplace/redhat-operators-n5jqk" Dec 13 07:30:11 crc kubenswrapper[5048]: I1213 07:30:11.566035 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9hn5h\" (UniqueName: \"kubernetes.io/projected/6f615071-be66-410f-a5fd-41422b9932c9-kube-api-access-9hn5h\") pod \"redhat-operators-n5jqk\" (UID: \"6f615071-be66-410f-a5fd-41422b9932c9\") " pod="openshift-marketplace/redhat-operators-n5jqk" Dec 13 07:30:11 crc kubenswrapper[5048]: I1213 07:30:11.566113 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f615071-be66-410f-a5fd-41422b9932c9-utilities\") pod \"redhat-operators-n5jqk\" (UID: \"6f615071-be66-410f-a5fd-41422b9932c9\") " pod="openshift-marketplace/redhat-operators-n5jqk" Dec 13 07:30:11 crc kubenswrapper[5048]: I1213 07:30:11.667902 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f615071-be66-410f-a5fd-41422b9932c9-catalog-content\") pod \"redhat-operators-n5jqk\" (UID: \"6f615071-be66-410f-a5fd-41422b9932c9\") " pod="openshift-marketplace/redhat-operators-n5jqk" Dec 13 07:30:11 crc kubenswrapper[5048]: I1213 07:30:11.667976 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9hn5h\" (UniqueName: \"kubernetes.io/projected/6f615071-be66-410f-a5fd-41422b9932c9-kube-api-access-9hn5h\") pod \"redhat-operators-n5jqk\" (UID: \"6f615071-be66-410f-a5fd-41422b9932c9\") " pod="openshift-marketplace/redhat-operators-n5jqk" Dec 13 07:30:11 crc kubenswrapper[5048]: I1213 07:30:11.668045 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f615071-be66-410f-a5fd-41422b9932c9-utilities\") pod \"redhat-operators-n5jqk\" (UID: \"6f615071-be66-410f-a5fd-41422b9932c9\") " pod="openshift-marketplace/redhat-operators-n5jqk" Dec 13 07:30:11 crc kubenswrapper[5048]: I1213 07:30:11.668698 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f615071-be66-410f-a5fd-41422b9932c9-utilities\") pod \"redhat-operators-n5jqk\" (UID: \"6f615071-be66-410f-a5fd-41422b9932c9\") " pod="openshift-marketplace/redhat-operators-n5jqk" Dec 13 07:30:11 crc kubenswrapper[5048]: I1213 07:30:11.668749 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f615071-be66-410f-a5fd-41422b9932c9-catalog-content\") pod \"redhat-operators-n5jqk\" (UID: \"6f615071-be66-410f-a5fd-41422b9932c9\") " pod="openshift-marketplace/redhat-operators-n5jqk" Dec 13 07:30:11 crc kubenswrapper[5048]: I1213 07:30:11.690350 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9hn5h\" (UniqueName: \"kubernetes.io/projected/6f615071-be66-410f-a5fd-41422b9932c9-kube-api-access-9hn5h\") pod \"redhat-operators-n5jqk\" (UID: \"6f615071-be66-410f-a5fd-41422b9932c9\") " pod="openshift-marketplace/redhat-operators-n5jqk" Dec 13 07:30:11 crc kubenswrapper[5048]: I1213 07:30:11.803659 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-n5jqk" Dec 13 07:30:16 crc kubenswrapper[5048]: I1213 07:30:16.216092 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:30:16 crc kubenswrapper[5048]: I1213 07:30:16.216750 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:30:16 crc kubenswrapper[5048]: I1213 07:30:16.379362 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-n5jqk"] Dec 13 07:30:16 crc kubenswrapper[5048]: W1213 07:30:16.386736 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6f615071_be66_410f_a5fd_41422b9932c9.slice/crio-5b789ebafaf50b34f619d259aad8dfeb3c039e2a3b677bcb809ca57f3f164a24 WatchSource:0}: Error finding container 5b789ebafaf50b34f619d259aad8dfeb3c039e2a3b677bcb809ca57f3f164a24: Status 404 returned error can't find the container with id 5b789ebafaf50b34f619d259aad8dfeb3c039e2a3b677bcb809ca57f3f164a24 Dec 13 07:30:16 crc kubenswrapper[5048]: I1213 07:30:16.483002 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-n9sbw/must-gather-whcvp" event={"ID":"59caec1f-b50d-4c55-9b3f-9e646f0642ec","Type":"ContainerStarted","Data":"86e373bd76a759258716285640078fca42454a17b2ba94681932c0e5e163b381"} Dec 13 07:30:16 crc kubenswrapper[5048]: I1213 07:30:16.483837 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n5jqk" event={"ID":"6f615071-be66-410f-a5fd-41422b9932c9","Type":"ContainerStarted","Data":"5b789ebafaf50b34f619d259aad8dfeb3c039e2a3b677bcb809ca57f3f164a24"} Dec 13 07:30:17 crc kubenswrapper[5048]: I1213 07:30:17.500337 5048 generic.go:334] "Generic (PLEG): container finished" podID="6f615071-be66-410f-a5fd-41422b9932c9" containerID="47818e8fc67f6258191dca735c5a26b1f14a3e7e973843d777740beb9991ed87" exitCode=0 Dec 13 07:30:17 crc kubenswrapper[5048]: I1213 07:30:17.500468 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n5jqk" event={"ID":"6f615071-be66-410f-a5fd-41422b9932c9","Type":"ContainerDied","Data":"47818e8fc67f6258191dca735c5a26b1f14a3e7e973843d777740beb9991ed87"} Dec 13 07:30:17 crc kubenswrapper[5048]: I1213 07:30:17.504502 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-n9sbw/must-gather-whcvp" event={"ID":"59caec1f-b50d-4c55-9b3f-9e646f0642ec","Type":"ContainerStarted","Data":"a76f581ee0ca568b530aea863442d1d4275aa5b57a9ad92dacf5d009cebaf42d"} Dec 13 07:30:17 crc kubenswrapper[5048]: I1213 07:30:17.558779 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-n9sbw/must-gather-whcvp" podStartSLOduration=3.473133829 podStartE2EDuration="10.558757383s" podCreationTimestamp="2025-12-13 07:30:07 +0000 UTC" firstStartedPulling="2025-12-13 07:30:08.917512259 +0000 UTC m=+3642.784106840" lastFinishedPulling="2025-12-13 07:30:16.003135813 +0000 UTC m=+3649.869730394" observedRunningTime="2025-12-13 07:30:17.540295434 +0000 UTC m=+3651.406890015" watchObservedRunningTime="2025-12-13 07:30:17.558757383 +0000 UTC m=+3651.425351984" Dec 13 07:30:19 crc kubenswrapper[5048]: I1213 07:30:19.531921 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n5jqk" event={"ID":"6f615071-be66-410f-a5fd-41422b9932c9","Type":"ContainerStarted","Data":"f308a9cc3674a35be7ac9a711d76b7a98c6425f1d1f6a2e7357a12ddd26ed707"} Dec 13 07:30:20 crc kubenswrapper[5048]: I1213 07:30:20.347558 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-n9sbw/crc-debug-sc6dr"] Dec 13 07:30:20 crc kubenswrapper[5048]: I1213 07:30:20.348701 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-n9sbw/crc-debug-sc6dr" Dec 13 07:30:20 crc kubenswrapper[5048]: I1213 07:30:20.478813 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/fcb618f5-8797-45ae-bdcc-04027097b73d-host\") pod \"crc-debug-sc6dr\" (UID: \"fcb618f5-8797-45ae-bdcc-04027097b73d\") " pod="openshift-must-gather-n9sbw/crc-debug-sc6dr" Dec 13 07:30:20 crc kubenswrapper[5048]: I1213 07:30:20.478897 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wgbbw\" (UniqueName: \"kubernetes.io/projected/fcb618f5-8797-45ae-bdcc-04027097b73d-kube-api-access-wgbbw\") pod \"crc-debug-sc6dr\" (UID: \"fcb618f5-8797-45ae-bdcc-04027097b73d\") " pod="openshift-must-gather-n9sbw/crc-debug-sc6dr" Dec 13 07:30:20 crc kubenswrapper[5048]: I1213 07:30:20.542425 5048 generic.go:334] "Generic (PLEG): container finished" podID="6f615071-be66-410f-a5fd-41422b9932c9" containerID="f308a9cc3674a35be7ac9a711d76b7a98c6425f1d1f6a2e7357a12ddd26ed707" exitCode=0 Dec 13 07:30:20 crc kubenswrapper[5048]: I1213 07:30:20.542645 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n5jqk" event={"ID":"6f615071-be66-410f-a5fd-41422b9932c9","Type":"ContainerDied","Data":"f308a9cc3674a35be7ac9a711d76b7a98c6425f1d1f6a2e7357a12ddd26ed707"} Dec 13 07:30:20 crc kubenswrapper[5048]: I1213 07:30:20.580709 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/fcb618f5-8797-45ae-bdcc-04027097b73d-host\") pod \"crc-debug-sc6dr\" (UID: \"fcb618f5-8797-45ae-bdcc-04027097b73d\") " pod="openshift-must-gather-n9sbw/crc-debug-sc6dr" Dec 13 07:30:20 crc kubenswrapper[5048]: I1213 07:30:20.580822 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wgbbw\" (UniqueName: \"kubernetes.io/projected/fcb618f5-8797-45ae-bdcc-04027097b73d-kube-api-access-wgbbw\") pod \"crc-debug-sc6dr\" (UID: \"fcb618f5-8797-45ae-bdcc-04027097b73d\") " pod="openshift-must-gather-n9sbw/crc-debug-sc6dr" Dec 13 07:30:20 crc kubenswrapper[5048]: I1213 07:30:20.581316 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/fcb618f5-8797-45ae-bdcc-04027097b73d-host\") pod \"crc-debug-sc6dr\" (UID: \"fcb618f5-8797-45ae-bdcc-04027097b73d\") " pod="openshift-must-gather-n9sbw/crc-debug-sc6dr" Dec 13 07:30:20 crc kubenswrapper[5048]: I1213 07:30:20.616505 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wgbbw\" (UniqueName: \"kubernetes.io/projected/fcb618f5-8797-45ae-bdcc-04027097b73d-kube-api-access-wgbbw\") pod \"crc-debug-sc6dr\" (UID: \"fcb618f5-8797-45ae-bdcc-04027097b73d\") " pod="openshift-must-gather-n9sbw/crc-debug-sc6dr" Dec 13 07:30:20 crc kubenswrapper[5048]: I1213 07:30:20.664580 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-n9sbw/crc-debug-sc6dr" Dec 13 07:30:20 crc kubenswrapper[5048]: W1213 07:30:20.693468 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfcb618f5_8797_45ae_bdcc_04027097b73d.slice/crio-0733a90566fba29fb12ef211f425be299d6a52b9786dfe0341fdbc39def451c5 WatchSource:0}: Error finding container 0733a90566fba29fb12ef211f425be299d6a52b9786dfe0341fdbc39def451c5: Status 404 returned error can't find the container with id 0733a90566fba29fb12ef211f425be299d6a52b9786dfe0341fdbc39def451c5 Dec 13 07:30:21 crc kubenswrapper[5048]: I1213 07:30:21.556663 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-n9sbw/crc-debug-sc6dr" event={"ID":"fcb618f5-8797-45ae-bdcc-04027097b73d","Type":"ContainerStarted","Data":"0733a90566fba29fb12ef211f425be299d6a52b9786dfe0341fdbc39def451c5"} Dec 13 07:30:24 crc kubenswrapper[5048]: I1213 07:30:24.588128 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n5jqk" event={"ID":"6f615071-be66-410f-a5fd-41422b9932c9","Type":"ContainerStarted","Data":"a0e27c89ce12012f11d3b347c8251e6a9e1341964d986629a6f8ee858004f795"} Dec 13 07:30:24 crc kubenswrapper[5048]: I1213 07:30:24.617567 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-n5jqk" podStartSLOduration=7.663876019 podStartE2EDuration="13.617549801s" podCreationTimestamp="2025-12-13 07:30:11 +0000 UTC" firstStartedPulling="2025-12-13 07:30:17.503347534 +0000 UTC m=+3651.369942155" lastFinishedPulling="2025-12-13 07:30:23.457021356 +0000 UTC m=+3657.323615937" observedRunningTime="2025-12-13 07:30:24.606149842 +0000 UTC m=+3658.472744423" watchObservedRunningTime="2025-12-13 07:30:24.617549801 +0000 UTC m=+3658.484144382" Dec 13 07:30:31 crc kubenswrapper[5048]: I1213 07:30:31.804510 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-n5jqk" Dec 13 07:30:31 crc kubenswrapper[5048]: I1213 07:30:31.805997 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-n5jqk" Dec 13 07:30:32 crc kubenswrapper[5048]: I1213 07:30:32.859532 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-n5jqk" podUID="6f615071-be66-410f-a5fd-41422b9932c9" containerName="registry-server" probeResult="failure" output=< Dec 13 07:30:32 crc kubenswrapper[5048]: timeout: failed to connect service ":50051" within 1s Dec 13 07:30:32 crc kubenswrapper[5048]: > Dec 13 07:30:36 crc kubenswrapper[5048]: I1213 07:30:36.964858 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-n9sbw/crc-debug-sc6dr" event={"ID":"fcb618f5-8797-45ae-bdcc-04027097b73d","Type":"ContainerStarted","Data":"c9aab5d85a946dc914c5d7b490c14fc6565c4ae081abeb8832653f596f21dd2c"} Dec 13 07:30:36 crc kubenswrapper[5048]: I1213 07:30:36.985865 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-n9sbw/crc-debug-sc6dr" podStartSLOduration=1.996484726 podStartE2EDuration="16.985850049s" podCreationTimestamp="2025-12-13 07:30:20 +0000 UTC" firstStartedPulling="2025-12-13 07:30:20.697937708 +0000 UTC m=+3654.564532279" lastFinishedPulling="2025-12-13 07:30:35.687303011 +0000 UTC m=+3669.553897602" observedRunningTime="2025-12-13 07:30:36.983409283 +0000 UTC m=+3670.850003864" watchObservedRunningTime="2025-12-13 07:30:36.985850049 +0000 UTC m=+3670.852444630" Dec 13 07:30:42 crc kubenswrapper[5048]: I1213 07:30:42.883925 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-n5jqk" podUID="6f615071-be66-410f-a5fd-41422b9932c9" containerName="registry-server" probeResult="failure" output=< Dec 13 07:30:42 crc kubenswrapper[5048]: timeout: failed to connect service ":50051" within 1s Dec 13 07:30:42 crc kubenswrapper[5048]: > Dec 13 07:30:46 crc kubenswrapper[5048]: I1213 07:30:46.216068 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:30:46 crc kubenswrapper[5048]: I1213 07:30:46.216847 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:30:51 crc kubenswrapper[5048]: I1213 07:30:51.857165 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-n5jqk" Dec 13 07:30:51 crc kubenswrapper[5048]: I1213 07:30:51.913924 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-n5jqk" Dec 13 07:30:52 crc kubenswrapper[5048]: I1213 07:30:52.092051 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-n5jqk"] Dec 13 07:30:53 crc kubenswrapper[5048]: I1213 07:30:53.111334 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-n5jqk" podUID="6f615071-be66-410f-a5fd-41422b9932c9" containerName="registry-server" containerID="cri-o://a0e27c89ce12012f11d3b347c8251e6a9e1341964d986629a6f8ee858004f795" gracePeriod=2 Dec 13 07:30:53 crc kubenswrapper[5048]: I1213 07:30:53.627417 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-n5jqk" Dec 13 07:30:53 crc kubenswrapper[5048]: I1213 07:30:53.798443 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9hn5h\" (UniqueName: \"kubernetes.io/projected/6f615071-be66-410f-a5fd-41422b9932c9-kube-api-access-9hn5h\") pod \"6f615071-be66-410f-a5fd-41422b9932c9\" (UID: \"6f615071-be66-410f-a5fd-41422b9932c9\") " Dec 13 07:30:53 crc kubenswrapper[5048]: I1213 07:30:53.798838 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f615071-be66-410f-a5fd-41422b9932c9-catalog-content\") pod \"6f615071-be66-410f-a5fd-41422b9932c9\" (UID: \"6f615071-be66-410f-a5fd-41422b9932c9\") " Dec 13 07:30:53 crc kubenswrapper[5048]: I1213 07:30:53.798913 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f615071-be66-410f-a5fd-41422b9932c9-utilities\") pod \"6f615071-be66-410f-a5fd-41422b9932c9\" (UID: \"6f615071-be66-410f-a5fd-41422b9932c9\") " Dec 13 07:30:53 crc kubenswrapper[5048]: I1213 07:30:53.799680 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f615071-be66-410f-a5fd-41422b9932c9-utilities" (OuterVolumeSpecName: "utilities") pod "6f615071-be66-410f-a5fd-41422b9932c9" (UID: "6f615071-be66-410f-a5fd-41422b9932c9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:30:53 crc kubenswrapper[5048]: I1213 07:30:53.808388 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f615071-be66-410f-a5fd-41422b9932c9-kube-api-access-9hn5h" (OuterVolumeSpecName: "kube-api-access-9hn5h") pod "6f615071-be66-410f-a5fd-41422b9932c9" (UID: "6f615071-be66-410f-a5fd-41422b9932c9"). InnerVolumeSpecName "kube-api-access-9hn5h". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:30:53 crc kubenswrapper[5048]: I1213 07:30:53.901477 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9hn5h\" (UniqueName: \"kubernetes.io/projected/6f615071-be66-410f-a5fd-41422b9932c9-kube-api-access-9hn5h\") on node \"crc\" DevicePath \"\"" Dec 13 07:30:53 crc kubenswrapper[5048]: I1213 07:30:53.901503 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f615071-be66-410f-a5fd-41422b9932c9-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 07:30:53 crc kubenswrapper[5048]: I1213 07:30:53.917798 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f615071-be66-410f-a5fd-41422b9932c9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6f615071-be66-410f-a5fd-41422b9932c9" (UID: "6f615071-be66-410f-a5fd-41422b9932c9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:30:54 crc kubenswrapper[5048]: I1213 07:30:54.003623 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f615071-be66-410f-a5fd-41422b9932c9-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 07:30:54 crc kubenswrapper[5048]: I1213 07:30:54.120098 5048 generic.go:334] "Generic (PLEG): container finished" podID="6f615071-be66-410f-a5fd-41422b9932c9" containerID="a0e27c89ce12012f11d3b347c8251e6a9e1341964d986629a6f8ee858004f795" exitCode=0 Dec 13 07:30:54 crc kubenswrapper[5048]: I1213 07:30:54.120172 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-n5jqk" Dec 13 07:30:54 crc kubenswrapper[5048]: I1213 07:30:54.120220 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n5jqk" event={"ID":"6f615071-be66-410f-a5fd-41422b9932c9","Type":"ContainerDied","Data":"a0e27c89ce12012f11d3b347c8251e6a9e1341964d986629a6f8ee858004f795"} Dec 13 07:30:54 crc kubenswrapper[5048]: I1213 07:30:54.121342 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n5jqk" event={"ID":"6f615071-be66-410f-a5fd-41422b9932c9","Type":"ContainerDied","Data":"5b789ebafaf50b34f619d259aad8dfeb3c039e2a3b677bcb809ca57f3f164a24"} Dec 13 07:30:54 crc kubenswrapper[5048]: I1213 07:30:54.121372 5048 scope.go:117] "RemoveContainer" containerID="a0e27c89ce12012f11d3b347c8251e6a9e1341964d986629a6f8ee858004f795" Dec 13 07:30:54 crc kubenswrapper[5048]: I1213 07:30:54.647303 5048 scope.go:117] "RemoveContainer" containerID="6740dc6014628f40d21daa469d719043dea8fa64585837e235a1362d71ce843b" Dec 13 07:30:55 crc kubenswrapper[5048]: I1213 07:30:55.688794 5048 scope.go:117] "RemoveContainer" containerID="f308a9cc3674a35be7ac9a711d76b7a98c6425f1d1f6a2e7357a12ddd26ed707" Dec 13 07:30:55 crc kubenswrapper[5048]: I1213 07:30:55.735220 5048 scope.go:117] "RemoveContainer" containerID="47818e8fc67f6258191dca735c5a26b1f14a3e7e973843d777740beb9991ed87" Dec 13 07:30:55 crc kubenswrapper[5048]: I1213 07:30:55.771520 5048 scope.go:117] "RemoveContainer" containerID="a0e27c89ce12012f11d3b347c8251e6a9e1341964d986629a6f8ee858004f795" Dec 13 07:30:55 crc kubenswrapper[5048]: E1213 07:30:55.773188 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a0e27c89ce12012f11d3b347c8251e6a9e1341964d986629a6f8ee858004f795\": container with ID starting with a0e27c89ce12012f11d3b347c8251e6a9e1341964d986629a6f8ee858004f795 not found: ID does not exist" containerID="a0e27c89ce12012f11d3b347c8251e6a9e1341964d986629a6f8ee858004f795" Dec 13 07:30:55 crc kubenswrapper[5048]: I1213 07:30:55.773221 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0e27c89ce12012f11d3b347c8251e6a9e1341964d986629a6f8ee858004f795"} err="failed to get container status \"a0e27c89ce12012f11d3b347c8251e6a9e1341964d986629a6f8ee858004f795\": rpc error: code = NotFound desc = could not find container \"a0e27c89ce12012f11d3b347c8251e6a9e1341964d986629a6f8ee858004f795\": container with ID starting with a0e27c89ce12012f11d3b347c8251e6a9e1341964d986629a6f8ee858004f795 not found: ID does not exist" Dec 13 07:30:55 crc kubenswrapper[5048]: I1213 07:30:55.773243 5048 scope.go:117] "RemoveContainer" containerID="f308a9cc3674a35be7ac9a711d76b7a98c6425f1d1f6a2e7357a12ddd26ed707" Dec 13 07:30:55 crc kubenswrapper[5048]: E1213 07:30:55.773729 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f308a9cc3674a35be7ac9a711d76b7a98c6425f1d1f6a2e7357a12ddd26ed707\": container with ID starting with f308a9cc3674a35be7ac9a711d76b7a98c6425f1d1f6a2e7357a12ddd26ed707 not found: ID does not exist" containerID="f308a9cc3674a35be7ac9a711d76b7a98c6425f1d1f6a2e7357a12ddd26ed707" Dec 13 07:30:55 crc kubenswrapper[5048]: I1213 07:30:55.773776 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f308a9cc3674a35be7ac9a711d76b7a98c6425f1d1f6a2e7357a12ddd26ed707"} err="failed to get container status \"f308a9cc3674a35be7ac9a711d76b7a98c6425f1d1f6a2e7357a12ddd26ed707\": rpc error: code = NotFound desc = could not find container \"f308a9cc3674a35be7ac9a711d76b7a98c6425f1d1f6a2e7357a12ddd26ed707\": container with ID starting with f308a9cc3674a35be7ac9a711d76b7a98c6425f1d1f6a2e7357a12ddd26ed707 not found: ID does not exist" Dec 13 07:30:55 crc kubenswrapper[5048]: I1213 07:30:55.773804 5048 scope.go:117] "RemoveContainer" containerID="47818e8fc67f6258191dca735c5a26b1f14a3e7e973843d777740beb9991ed87" Dec 13 07:30:55 crc kubenswrapper[5048]: E1213 07:30:55.774357 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47818e8fc67f6258191dca735c5a26b1f14a3e7e973843d777740beb9991ed87\": container with ID starting with 47818e8fc67f6258191dca735c5a26b1f14a3e7e973843d777740beb9991ed87 not found: ID does not exist" containerID="47818e8fc67f6258191dca735c5a26b1f14a3e7e973843d777740beb9991ed87" Dec 13 07:30:55 crc kubenswrapper[5048]: I1213 07:30:55.774387 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47818e8fc67f6258191dca735c5a26b1f14a3e7e973843d777740beb9991ed87"} err="failed to get container status \"47818e8fc67f6258191dca735c5a26b1f14a3e7e973843d777740beb9991ed87\": rpc error: code = NotFound desc = could not find container \"47818e8fc67f6258191dca735c5a26b1f14a3e7e973843d777740beb9991ed87\": container with ID starting with 47818e8fc67f6258191dca735c5a26b1f14a3e7e973843d777740beb9991ed87 not found: ID does not exist" Dec 13 07:31:16 crc kubenswrapper[5048]: I1213 07:31:16.216337 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:31:16 crc kubenswrapper[5048]: I1213 07:31:16.217410 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:31:16 crc kubenswrapper[5048]: I1213 07:31:16.217570 5048 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 07:31:16 crc kubenswrapper[5048]: I1213 07:31:16.218552 5048 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7"} pod="openshift-machine-config-operator/machine-config-daemon-j7hns" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 13 07:31:16 crc kubenswrapper[5048]: I1213 07:31:16.218670 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" containerID="cri-o://816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" gracePeriod=600 Dec 13 07:31:16 crc kubenswrapper[5048]: I1213 07:31:16.362386 5048 generic.go:334] "Generic (PLEG): container finished" podID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" exitCode=0 Dec 13 07:31:16 crc kubenswrapper[5048]: I1213 07:31:16.362479 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerDied","Data":"816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7"} Dec 13 07:31:16 crc kubenswrapper[5048]: I1213 07:31:16.362554 5048 scope.go:117] "RemoveContainer" containerID="305e2d72c42c0e511700afcae549613ac74af39a3f7fabb14c595f37c385a74b" Dec 13 07:31:16 crc kubenswrapper[5048]: E1213 07:31:16.485317 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:31:17 crc kubenswrapper[5048]: I1213 07:31:17.376798 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:31:17 crc kubenswrapper[5048]: E1213 07:31:17.377367 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:31:20 crc kubenswrapper[5048]: I1213 07:31:20.423864 5048 generic.go:334] "Generic (PLEG): container finished" podID="fcb618f5-8797-45ae-bdcc-04027097b73d" containerID="c9aab5d85a946dc914c5d7b490c14fc6565c4ae081abeb8832653f596f21dd2c" exitCode=0 Dec 13 07:31:20 crc kubenswrapper[5048]: I1213 07:31:20.423916 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-n9sbw/crc-debug-sc6dr" event={"ID":"fcb618f5-8797-45ae-bdcc-04027097b73d","Type":"ContainerDied","Data":"c9aab5d85a946dc914c5d7b490c14fc6565c4ae081abeb8832653f596f21dd2c"} Dec 13 07:31:21 crc kubenswrapper[5048]: I1213 07:31:21.562865 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-n9sbw/crc-debug-sc6dr" Dec 13 07:31:21 crc kubenswrapper[5048]: I1213 07:31:21.618803 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-n9sbw/crc-debug-sc6dr"] Dec 13 07:31:21 crc kubenswrapper[5048]: I1213 07:31:21.627054 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wgbbw\" (UniqueName: \"kubernetes.io/projected/fcb618f5-8797-45ae-bdcc-04027097b73d-kube-api-access-wgbbw\") pod \"fcb618f5-8797-45ae-bdcc-04027097b73d\" (UID: \"fcb618f5-8797-45ae-bdcc-04027097b73d\") " Dec 13 07:31:21 crc kubenswrapper[5048]: I1213 07:31:21.627173 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/fcb618f5-8797-45ae-bdcc-04027097b73d-host\") pod \"fcb618f5-8797-45ae-bdcc-04027097b73d\" (UID: \"fcb618f5-8797-45ae-bdcc-04027097b73d\") " Dec 13 07:31:21 crc kubenswrapper[5048]: I1213 07:31:21.627598 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fcb618f5-8797-45ae-bdcc-04027097b73d-host" (OuterVolumeSpecName: "host") pod "fcb618f5-8797-45ae-bdcc-04027097b73d" (UID: "fcb618f5-8797-45ae-bdcc-04027097b73d"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 07:31:21 crc kubenswrapper[5048]: I1213 07:31:21.634627 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-n9sbw/crc-debug-sc6dr"] Dec 13 07:31:21 crc kubenswrapper[5048]: I1213 07:31:21.637371 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fcb618f5-8797-45ae-bdcc-04027097b73d-kube-api-access-wgbbw" (OuterVolumeSpecName: "kube-api-access-wgbbw") pod "fcb618f5-8797-45ae-bdcc-04027097b73d" (UID: "fcb618f5-8797-45ae-bdcc-04027097b73d"). InnerVolumeSpecName "kube-api-access-wgbbw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:31:21 crc kubenswrapper[5048]: I1213 07:31:21.730583 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wgbbw\" (UniqueName: \"kubernetes.io/projected/fcb618f5-8797-45ae-bdcc-04027097b73d-kube-api-access-wgbbw\") on node \"crc\" DevicePath \"\"" Dec 13 07:31:21 crc kubenswrapper[5048]: I1213 07:31:21.730635 5048 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/fcb618f5-8797-45ae-bdcc-04027097b73d-host\") on node \"crc\" DevicePath \"\"" Dec 13 07:31:22 crc kubenswrapper[5048]: I1213 07:31:22.452067 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0733a90566fba29fb12ef211f425be299d6a52b9786dfe0341fdbc39def451c5" Dec 13 07:31:22 crc kubenswrapper[5048]: I1213 07:31:22.452188 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-n9sbw/crc-debug-sc6dr" Dec 13 07:31:22 crc kubenswrapper[5048]: I1213 07:31:22.584822 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fcb618f5-8797-45ae-bdcc-04027097b73d" path="/var/lib/kubelet/pods/fcb618f5-8797-45ae-bdcc-04027097b73d/volumes" Dec 13 07:31:22 crc kubenswrapper[5048]: I1213 07:31:22.774639 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-n9sbw/crc-debug-zhf88"] Dec 13 07:31:22 crc kubenswrapper[5048]: E1213 07:31:22.775092 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f615071-be66-410f-a5fd-41422b9932c9" containerName="extract-content" Dec 13 07:31:22 crc kubenswrapper[5048]: I1213 07:31:22.775115 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f615071-be66-410f-a5fd-41422b9932c9" containerName="extract-content" Dec 13 07:31:22 crc kubenswrapper[5048]: E1213 07:31:22.775136 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f615071-be66-410f-a5fd-41422b9932c9" containerName="extract-utilities" Dec 13 07:31:22 crc kubenswrapper[5048]: I1213 07:31:22.775144 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f615071-be66-410f-a5fd-41422b9932c9" containerName="extract-utilities" Dec 13 07:31:22 crc kubenswrapper[5048]: E1213 07:31:22.775175 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcb618f5-8797-45ae-bdcc-04027097b73d" containerName="container-00" Dec 13 07:31:22 crc kubenswrapper[5048]: I1213 07:31:22.775182 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcb618f5-8797-45ae-bdcc-04027097b73d" containerName="container-00" Dec 13 07:31:22 crc kubenswrapper[5048]: E1213 07:31:22.775201 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f615071-be66-410f-a5fd-41422b9932c9" containerName="registry-server" Dec 13 07:31:22 crc kubenswrapper[5048]: I1213 07:31:22.775208 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f615071-be66-410f-a5fd-41422b9932c9" containerName="registry-server" Dec 13 07:31:22 crc kubenswrapper[5048]: I1213 07:31:22.775416 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f615071-be66-410f-a5fd-41422b9932c9" containerName="registry-server" Dec 13 07:31:22 crc kubenswrapper[5048]: I1213 07:31:22.775462 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="fcb618f5-8797-45ae-bdcc-04027097b73d" containerName="container-00" Dec 13 07:31:22 crc kubenswrapper[5048]: I1213 07:31:22.776180 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-n9sbw/crc-debug-zhf88" Dec 13 07:31:22 crc kubenswrapper[5048]: I1213 07:31:22.860176 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/25b8f0e2-89cd-4d47-bc92-574b26b7405c-host\") pod \"crc-debug-zhf88\" (UID: \"25b8f0e2-89cd-4d47-bc92-574b26b7405c\") " pod="openshift-must-gather-n9sbw/crc-debug-zhf88" Dec 13 07:31:22 crc kubenswrapper[5048]: I1213 07:31:22.860395 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9mtr\" (UniqueName: \"kubernetes.io/projected/25b8f0e2-89cd-4d47-bc92-574b26b7405c-kube-api-access-m9mtr\") pod \"crc-debug-zhf88\" (UID: \"25b8f0e2-89cd-4d47-bc92-574b26b7405c\") " pod="openshift-must-gather-n9sbw/crc-debug-zhf88" Dec 13 07:31:22 crc kubenswrapper[5048]: I1213 07:31:22.962699 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/25b8f0e2-89cd-4d47-bc92-574b26b7405c-host\") pod \"crc-debug-zhf88\" (UID: \"25b8f0e2-89cd-4d47-bc92-574b26b7405c\") " pod="openshift-must-gather-n9sbw/crc-debug-zhf88" Dec 13 07:31:22 crc kubenswrapper[5048]: I1213 07:31:22.962915 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/25b8f0e2-89cd-4d47-bc92-574b26b7405c-host\") pod \"crc-debug-zhf88\" (UID: \"25b8f0e2-89cd-4d47-bc92-574b26b7405c\") " pod="openshift-must-gather-n9sbw/crc-debug-zhf88" Dec 13 07:31:22 crc kubenswrapper[5048]: I1213 07:31:22.962979 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9mtr\" (UniqueName: \"kubernetes.io/projected/25b8f0e2-89cd-4d47-bc92-574b26b7405c-kube-api-access-m9mtr\") pod \"crc-debug-zhf88\" (UID: \"25b8f0e2-89cd-4d47-bc92-574b26b7405c\") " pod="openshift-must-gather-n9sbw/crc-debug-zhf88" Dec 13 07:31:22 crc kubenswrapper[5048]: I1213 07:31:22.986754 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9mtr\" (UniqueName: \"kubernetes.io/projected/25b8f0e2-89cd-4d47-bc92-574b26b7405c-kube-api-access-m9mtr\") pod \"crc-debug-zhf88\" (UID: \"25b8f0e2-89cd-4d47-bc92-574b26b7405c\") " pod="openshift-must-gather-n9sbw/crc-debug-zhf88" Dec 13 07:31:23 crc kubenswrapper[5048]: I1213 07:31:23.097617 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-n9sbw/crc-debug-zhf88" Dec 13 07:31:23 crc kubenswrapper[5048]: I1213 07:31:23.467741 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-n9sbw/crc-debug-zhf88" event={"ID":"25b8f0e2-89cd-4d47-bc92-574b26b7405c","Type":"ContainerStarted","Data":"8083add98a8dba457cea4d79bf51e1d3f0ea72acc8392066fd6ab03d1fbf9b13"} Dec 13 07:31:23 crc kubenswrapper[5048]: I1213 07:31:23.468087 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-n9sbw/crc-debug-zhf88" event={"ID":"25b8f0e2-89cd-4d47-bc92-574b26b7405c","Type":"ContainerStarted","Data":"cd20fcafa485890ae15519a8e9335f5e2de205ec15914b0e64dca45308b090fd"} Dec 13 07:31:24 crc kubenswrapper[5048]: I1213 07:31:24.478079 5048 generic.go:334] "Generic (PLEG): container finished" podID="25b8f0e2-89cd-4d47-bc92-574b26b7405c" containerID="8083add98a8dba457cea4d79bf51e1d3f0ea72acc8392066fd6ab03d1fbf9b13" exitCode=0 Dec 13 07:31:24 crc kubenswrapper[5048]: I1213 07:31:24.478151 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-n9sbw/crc-debug-zhf88" event={"ID":"25b8f0e2-89cd-4d47-bc92-574b26b7405c","Type":"ContainerDied","Data":"8083add98a8dba457cea4d79bf51e1d3f0ea72acc8392066fd6ab03d1fbf9b13"} Dec 13 07:31:25 crc kubenswrapper[5048]: I1213 07:31:25.624670 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-n9sbw/crc-debug-zhf88" Dec 13 07:31:25 crc kubenswrapper[5048]: I1213 07:31:25.655113 5048 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","burstable","pod6f615071-be66-410f-a5fd-41422b9932c9"] err="unable to destroy cgroup paths for cgroup [kubepods burstable pod6f615071-be66-410f-a5fd-41422b9932c9] : Timed out while waiting for systemd to remove kubepods-burstable-pod6f615071_be66_410f_a5fd_41422b9932c9.slice" Dec 13 07:31:25 crc kubenswrapper[5048]: E1213 07:31:25.655174 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods burstable pod6f615071-be66-410f-a5fd-41422b9932c9] : unable to destroy cgroup paths for cgroup [kubepods burstable pod6f615071-be66-410f-a5fd-41422b9932c9] : Timed out while waiting for systemd to remove kubepods-burstable-pod6f615071_be66_410f_a5fd_41422b9932c9.slice" pod="openshift-marketplace/redhat-operators-n5jqk" podUID="6f615071-be66-410f-a5fd-41422b9932c9" Dec 13 07:31:25 crc kubenswrapper[5048]: I1213 07:31:25.665938 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-n9sbw/crc-debug-zhf88"] Dec 13 07:31:25 crc kubenswrapper[5048]: I1213 07:31:25.677994 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-n9sbw/crc-debug-zhf88"] Dec 13 07:31:25 crc kubenswrapper[5048]: I1213 07:31:25.713758 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m9mtr\" (UniqueName: \"kubernetes.io/projected/25b8f0e2-89cd-4d47-bc92-574b26b7405c-kube-api-access-m9mtr\") pod \"25b8f0e2-89cd-4d47-bc92-574b26b7405c\" (UID: \"25b8f0e2-89cd-4d47-bc92-574b26b7405c\") " Dec 13 07:31:25 crc kubenswrapper[5048]: I1213 07:31:25.714002 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/25b8f0e2-89cd-4d47-bc92-574b26b7405c-host\") pod \"25b8f0e2-89cd-4d47-bc92-574b26b7405c\" (UID: \"25b8f0e2-89cd-4d47-bc92-574b26b7405c\") " Dec 13 07:31:25 crc kubenswrapper[5048]: I1213 07:31:25.714150 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/25b8f0e2-89cd-4d47-bc92-574b26b7405c-host" (OuterVolumeSpecName: "host") pod "25b8f0e2-89cd-4d47-bc92-574b26b7405c" (UID: "25b8f0e2-89cd-4d47-bc92-574b26b7405c"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 07:31:25 crc kubenswrapper[5048]: I1213 07:31:25.714612 5048 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/25b8f0e2-89cd-4d47-bc92-574b26b7405c-host\") on node \"crc\" DevicePath \"\"" Dec 13 07:31:25 crc kubenswrapper[5048]: I1213 07:31:25.719165 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25b8f0e2-89cd-4d47-bc92-574b26b7405c-kube-api-access-m9mtr" (OuterVolumeSpecName: "kube-api-access-m9mtr") pod "25b8f0e2-89cd-4d47-bc92-574b26b7405c" (UID: "25b8f0e2-89cd-4d47-bc92-574b26b7405c"). InnerVolumeSpecName "kube-api-access-m9mtr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:31:25 crc kubenswrapper[5048]: I1213 07:31:25.815347 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m9mtr\" (UniqueName: \"kubernetes.io/projected/25b8f0e2-89cd-4d47-bc92-574b26b7405c-kube-api-access-m9mtr\") on node \"crc\" DevicePath \"\"" Dec 13 07:31:26 crc kubenswrapper[5048]: I1213 07:31:26.521500 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cd20fcafa485890ae15519a8e9335f5e2de205ec15914b0e64dca45308b090fd" Dec 13 07:31:26 crc kubenswrapper[5048]: I1213 07:31:26.521550 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-n5jqk" Dec 13 07:31:26 crc kubenswrapper[5048]: I1213 07:31:26.521550 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-n9sbw/crc-debug-zhf88" Dec 13 07:31:26 crc kubenswrapper[5048]: I1213 07:31:26.561748 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-n5jqk"] Dec 13 07:31:26 crc kubenswrapper[5048]: I1213 07:31:26.586795 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25b8f0e2-89cd-4d47-bc92-574b26b7405c" path="/var/lib/kubelet/pods/25b8f0e2-89cd-4d47-bc92-574b26b7405c/volumes" Dec 13 07:31:26 crc kubenswrapper[5048]: I1213 07:31:26.587695 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-n5jqk"] Dec 13 07:31:26 crc kubenswrapper[5048]: I1213 07:31:26.822800 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-n9sbw/crc-debug-nbddw"] Dec 13 07:31:26 crc kubenswrapper[5048]: E1213 07:31:26.823246 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25b8f0e2-89cd-4d47-bc92-574b26b7405c" containerName="container-00" Dec 13 07:31:26 crc kubenswrapper[5048]: I1213 07:31:26.823262 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="25b8f0e2-89cd-4d47-bc92-574b26b7405c" containerName="container-00" Dec 13 07:31:26 crc kubenswrapper[5048]: I1213 07:31:26.823568 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="25b8f0e2-89cd-4d47-bc92-574b26b7405c" containerName="container-00" Dec 13 07:31:26 crc kubenswrapper[5048]: I1213 07:31:26.824278 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-n9sbw/crc-debug-nbddw" Dec 13 07:31:26 crc kubenswrapper[5048]: I1213 07:31:26.936743 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vstz7\" (UniqueName: \"kubernetes.io/projected/ad02faeb-a1ee-45a4-924c-83c1f17df2f4-kube-api-access-vstz7\") pod \"crc-debug-nbddw\" (UID: \"ad02faeb-a1ee-45a4-924c-83c1f17df2f4\") " pod="openshift-must-gather-n9sbw/crc-debug-nbddw" Dec 13 07:31:26 crc kubenswrapper[5048]: I1213 07:31:26.937220 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ad02faeb-a1ee-45a4-924c-83c1f17df2f4-host\") pod \"crc-debug-nbddw\" (UID: \"ad02faeb-a1ee-45a4-924c-83c1f17df2f4\") " pod="openshift-must-gather-n9sbw/crc-debug-nbddw" Dec 13 07:31:27 crc kubenswrapper[5048]: I1213 07:31:27.039281 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vstz7\" (UniqueName: \"kubernetes.io/projected/ad02faeb-a1ee-45a4-924c-83c1f17df2f4-kube-api-access-vstz7\") pod \"crc-debug-nbddw\" (UID: \"ad02faeb-a1ee-45a4-924c-83c1f17df2f4\") " pod="openshift-must-gather-n9sbw/crc-debug-nbddw" Dec 13 07:31:27 crc kubenswrapper[5048]: I1213 07:31:27.039420 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ad02faeb-a1ee-45a4-924c-83c1f17df2f4-host\") pod \"crc-debug-nbddw\" (UID: \"ad02faeb-a1ee-45a4-924c-83c1f17df2f4\") " pod="openshift-must-gather-n9sbw/crc-debug-nbddw" Dec 13 07:31:27 crc kubenswrapper[5048]: I1213 07:31:27.039624 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ad02faeb-a1ee-45a4-924c-83c1f17df2f4-host\") pod \"crc-debug-nbddw\" (UID: \"ad02faeb-a1ee-45a4-924c-83c1f17df2f4\") " pod="openshift-must-gather-n9sbw/crc-debug-nbddw" Dec 13 07:31:27 crc kubenswrapper[5048]: I1213 07:31:27.073431 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vstz7\" (UniqueName: \"kubernetes.io/projected/ad02faeb-a1ee-45a4-924c-83c1f17df2f4-kube-api-access-vstz7\") pod \"crc-debug-nbddw\" (UID: \"ad02faeb-a1ee-45a4-924c-83c1f17df2f4\") " pod="openshift-must-gather-n9sbw/crc-debug-nbddw" Dec 13 07:31:27 crc kubenswrapper[5048]: I1213 07:31:27.153605 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-n9sbw/crc-debug-nbddw" Dec 13 07:31:27 crc kubenswrapper[5048]: I1213 07:31:27.536871 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-n9sbw/crc-debug-nbddw" event={"ID":"ad02faeb-a1ee-45a4-924c-83c1f17df2f4","Type":"ContainerStarted","Data":"356d0b06ea8e8ec5998a15253a903b0028a7594fa2f2d2d68750562fae88eee2"} Dec 13 07:31:27 crc kubenswrapper[5048]: I1213 07:31:27.566928 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:31:27 crc kubenswrapper[5048]: E1213 07:31:27.567570 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:31:28 crc kubenswrapper[5048]: I1213 07:31:28.547174 5048 generic.go:334] "Generic (PLEG): container finished" podID="ad02faeb-a1ee-45a4-924c-83c1f17df2f4" containerID="045759590f512e4aa3e534f8146dfe86d069b93d731677a25882ff18aae502fe" exitCode=0 Dec 13 07:31:28 crc kubenswrapper[5048]: I1213 07:31:28.547223 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-n9sbw/crc-debug-nbddw" event={"ID":"ad02faeb-a1ee-45a4-924c-83c1f17df2f4","Type":"ContainerDied","Data":"045759590f512e4aa3e534f8146dfe86d069b93d731677a25882ff18aae502fe"} Dec 13 07:31:28 crc kubenswrapper[5048]: I1213 07:31:28.582303 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f615071-be66-410f-a5fd-41422b9932c9" path="/var/lib/kubelet/pods/6f615071-be66-410f-a5fd-41422b9932c9/volumes" Dec 13 07:31:28 crc kubenswrapper[5048]: I1213 07:31:28.592558 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-n9sbw/crc-debug-nbddw"] Dec 13 07:31:28 crc kubenswrapper[5048]: I1213 07:31:28.601928 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-n9sbw/crc-debug-nbddw"] Dec 13 07:31:29 crc kubenswrapper[5048]: I1213 07:31:29.668194 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-n9sbw/crc-debug-nbddw" Dec 13 07:31:29 crc kubenswrapper[5048]: I1213 07:31:29.687311 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vstz7\" (UniqueName: \"kubernetes.io/projected/ad02faeb-a1ee-45a4-924c-83c1f17df2f4-kube-api-access-vstz7\") pod \"ad02faeb-a1ee-45a4-924c-83c1f17df2f4\" (UID: \"ad02faeb-a1ee-45a4-924c-83c1f17df2f4\") " Dec 13 07:31:29 crc kubenswrapper[5048]: I1213 07:31:29.687425 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ad02faeb-a1ee-45a4-924c-83c1f17df2f4-host\") pod \"ad02faeb-a1ee-45a4-924c-83c1f17df2f4\" (UID: \"ad02faeb-a1ee-45a4-924c-83c1f17df2f4\") " Dec 13 07:31:29 crc kubenswrapper[5048]: I1213 07:31:29.687921 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ad02faeb-a1ee-45a4-924c-83c1f17df2f4-host" (OuterVolumeSpecName: "host") pod "ad02faeb-a1ee-45a4-924c-83c1f17df2f4" (UID: "ad02faeb-a1ee-45a4-924c-83c1f17df2f4"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 07:31:29 crc kubenswrapper[5048]: I1213 07:31:29.693255 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad02faeb-a1ee-45a4-924c-83c1f17df2f4-kube-api-access-vstz7" (OuterVolumeSpecName: "kube-api-access-vstz7") pod "ad02faeb-a1ee-45a4-924c-83c1f17df2f4" (UID: "ad02faeb-a1ee-45a4-924c-83c1f17df2f4"). InnerVolumeSpecName "kube-api-access-vstz7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:31:29 crc kubenswrapper[5048]: I1213 07:31:29.789472 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vstz7\" (UniqueName: \"kubernetes.io/projected/ad02faeb-a1ee-45a4-924c-83c1f17df2f4-kube-api-access-vstz7\") on node \"crc\" DevicePath \"\"" Dec 13 07:31:29 crc kubenswrapper[5048]: I1213 07:31:29.789505 5048 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ad02faeb-a1ee-45a4-924c-83c1f17df2f4-host\") on node \"crc\" DevicePath \"\"" Dec 13 07:31:30 crc kubenswrapper[5048]: I1213 07:31:30.571468 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-n9sbw/crc-debug-nbddw" Dec 13 07:31:30 crc kubenswrapper[5048]: I1213 07:31:30.577661 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad02faeb-a1ee-45a4-924c-83c1f17df2f4" path="/var/lib/kubelet/pods/ad02faeb-a1ee-45a4-924c-83c1f17df2f4/volumes" Dec 13 07:31:30 crc kubenswrapper[5048]: I1213 07:31:30.578367 5048 scope.go:117] "RemoveContainer" containerID="045759590f512e4aa3e534f8146dfe86d069b93d731677a25882ff18aae502fe" Dec 13 07:31:41 crc kubenswrapper[5048]: I1213 07:31:41.567058 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:31:41 crc kubenswrapper[5048]: E1213 07:31:41.567973 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:31:45 crc kubenswrapper[5048]: I1213 07:31:45.793425 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-94459c6fd-6dkbd_dca6704a-bfa6-42db-9692-f6b21a2c9e08/barbican-api/0.log" Dec 13 07:31:45 crc kubenswrapper[5048]: I1213 07:31:45.903483 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-94459c6fd-6dkbd_dca6704a-bfa6-42db-9692-f6b21a2c9e08/barbican-api-log/0.log" Dec 13 07:31:45 crc kubenswrapper[5048]: I1213 07:31:45.948806 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-7dd9cf6646-xj92w_ff1df1f6-e2a6-4e70-b352-f3b15b9255d7/barbican-keystone-listener/0.log" Dec 13 07:31:45 crc kubenswrapper[5048]: I1213 07:31:45.980702 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-7dd9cf6646-xj92w_ff1df1f6-e2a6-4e70-b352-f3b15b9255d7/barbican-keystone-listener-log/0.log" Dec 13 07:31:46 crc kubenswrapper[5048]: I1213 07:31:46.108073 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-7888dc9665-xrmff_ec726365-6964-4e11-942d-d57482573f01/barbican-worker/0.log" Dec 13 07:31:46 crc kubenswrapper[5048]: I1213 07:31:46.191885 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-7888dc9665-xrmff_ec726365-6964-4e11-942d-d57482573f01/barbican-worker-log/0.log" Dec 13 07:31:46 crc kubenswrapper[5048]: I1213 07:31:46.301158 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx_868a604c-3a79-4945-b54e-950797bed05d/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:31:46 crc kubenswrapper[5048]: I1213 07:31:46.367707 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_a6d85513-7b75-40e7-9eae-08544cccbc55/ceilometer-central-agent/0.log" Dec 13 07:31:46 crc kubenswrapper[5048]: I1213 07:31:46.452666 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_a6d85513-7b75-40e7-9eae-08544cccbc55/ceilometer-notification-agent/0.log" Dec 13 07:31:46 crc kubenswrapper[5048]: I1213 07:31:46.482833 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_a6d85513-7b75-40e7-9eae-08544cccbc55/proxy-httpd/0.log" Dec 13 07:31:46 crc kubenswrapper[5048]: I1213 07:31:46.557511 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_a6d85513-7b75-40e7-9eae-08544cccbc55/sg-core/0.log" Dec 13 07:31:46 crc kubenswrapper[5048]: I1213 07:31:46.664848 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_2341822f-d44f-47a2-a543-655dc0b26866/cinder-api-log/0.log" Dec 13 07:31:46 crc kubenswrapper[5048]: I1213 07:31:46.690047 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_2341822f-d44f-47a2-a543-655dc0b26866/cinder-api/0.log" Dec 13 07:31:46 crc kubenswrapper[5048]: I1213 07:31:46.850503 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_e215aafd-55f0-449e-9886-fa2b93d7fd83/cinder-scheduler/0.log" Dec 13 07:31:46 crc kubenswrapper[5048]: I1213 07:31:46.934662 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_e215aafd-55f0-449e-9886-fa2b93d7fd83/probe/0.log" Dec 13 07:31:47 crc kubenswrapper[5048]: I1213 07:31:47.012991 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc_7df2d78c-502d-4d7c-9233-cf01992cab77/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:31:47 crc kubenswrapper[5048]: I1213 07:31:47.195592 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7_cfd8e6ef-8724-4363-8a56-71f2b0f24f15/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:31:47 crc kubenswrapper[5048]: I1213 07:31:47.212860 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-zws2g_40c89533-f85e-4c8f-9826-f1affe855947/init/0.log" Dec 13 07:31:47 crc kubenswrapper[5048]: I1213 07:31:47.402610 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-zws2g_40c89533-f85e-4c8f-9826-f1affe855947/init/0.log" Dec 13 07:31:47 crc kubenswrapper[5048]: I1213 07:31:47.410898 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-zws2g_40c89533-f85e-4c8f-9826-f1affe855947/dnsmasq-dns/0.log" Dec 13 07:31:47 crc kubenswrapper[5048]: I1213 07:31:47.436243 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-j46dh_78eb5a3b-7802-4507-bb35-37cc2e8edb56/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:31:47 crc kubenswrapper[5048]: I1213 07:31:47.582286 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_c33951ff-f856-420b-90e1-6f776931b17e/glance-httpd/0.log" Dec 13 07:31:47 crc kubenswrapper[5048]: I1213 07:31:47.655396 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_c33951ff-f856-420b-90e1-6f776931b17e/glance-log/0.log" Dec 13 07:31:47 crc kubenswrapper[5048]: I1213 07:31:47.799271 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_be68c40d-a83c-40f4-ab3b-4f50f64aae15/glance-log/0.log" Dec 13 07:31:47 crc kubenswrapper[5048]: I1213 07:31:47.805806 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_be68c40d-a83c-40f4-ab3b-4f50f64aae15/glance-httpd/0.log" Dec 13 07:31:48 crc kubenswrapper[5048]: I1213 07:31:48.010796 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-657fc95f76-vznd4_1a49463c-d974-4631-b6ef-3f88d734ac2d/horizon/0.log" Dec 13 07:31:48 crc kubenswrapper[5048]: I1213 07:31:48.147669 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz_a6c582a0-39fc-4d6c-aa84-367f11c30ff1/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:31:48 crc kubenswrapper[5048]: I1213 07:31:48.401005 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-657fc95f76-vznd4_1a49463c-d974-4631-b6ef-3f88d734ac2d/horizon-log/0.log" Dec 13 07:31:48 crc kubenswrapper[5048]: I1213 07:31:48.437276 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-5bk98_58f90e59-a2c6-4099-b5eb-6a35c0448a1f/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:31:48 crc kubenswrapper[5048]: I1213 07:31:48.583996 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-5c89574bc9-gw9cl_d339d78d-798f-4147-85f3-87e7a05515dc/keystone-api/0.log" Dec 13 07:31:48 crc kubenswrapper[5048]: I1213 07:31:48.629124 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29426821-gd94t_6748fa72-aa27-4717-9434-2a06950c519a/keystone-cron/0.log" Dec 13 07:31:48 crc kubenswrapper[5048]: I1213 07:31:48.989865 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_c1e7f131-1d2e-41de-8cba-e54b383324c5/kube-state-metrics/0.log" Dec 13 07:31:49 crc kubenswrapper[5048]: I1213 07:31:49.033187 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-47k9k_482402ba-adeb-4175-911a-2ab863e44d4e/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:31:49 crc kubenswrapper[5048]: I1213 07:31:49.402075 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6465fd554f-k9lkr_4a6223fc-c3a5-462d-b61d-ebd353bbe7ca/neutron-api/0.log" Dec 13 07:31:49 crc kubenswrapper[5048]: I1213 07:31:49.456747 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6465fd554f-k9lkr_4a6223fc-c3a5-462d-b61d-ebd353bbe7ca/neutron-httpd/0.log" Dec 13 07:31:49 crc kubenswrapper[5048]: I1213 07:31:49.615337 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb_967349d1-5d27-480d-8e31-2eaa33e3c7e0/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:31:50 crc kubenswrapper[5048]: I1213 07:31:50.117552 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_1e4cb8ce-de40-4d30-beb4-fa5a0dae4e1e/nova-cell0-conductor-conductor/0.log" Dec 13 07:31:50 crc kubenswrapper[5048]: I1213 07:31:50.122922 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_40f60c37-7e8e-4a30-9d38-26296975a60c/nova-api-log/0.log" Dec 13 07:31:50 crc kubenswrapper[5048]: I1213 07:31:50.286618 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_40f60c37-7e8e-4a30-9d38-26296975a60c/nova-api-api/0.log" Dec 13 07:31:50 crc kubenswrapper[5048]: I1213 07:31:50.400477 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_219c43b9-12af-4e67-9c5e-93b2e83623b1/nova-cell1-conductor-conductor/0.log" Dec 13 07:31:50 crc kubenswrapper[5048]: I1213 07:31:50.444720 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_6646f183-ffd9-4870-a202-85003939acd6/nova-cell1-novncproxy-novncproxy/0.log" Dec 13 07:31:50 crc kubenswrapper[5048]: I1213 07:31:50.582877 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-flsxx_e460e258-aa7f-4839-9443-50b9afe4557b/nova-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:31:50 crc kubenswrapper[5048]: I1213 07:31:50.759772 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_b9502006-ca4a-4a71-90ef-4f86311c70fc/nova-metadata-log/0.log" Dec 13 07:31:50 crc kubenswrapper[5048]: I1213 07:31:50.996101 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_bf17e7ba-3f68-4aca-ae8e-fb82f10f18d4/nova-scheduler-scheduler/0.log" Dec 13 07:31:51 crc kubenswrapper[5048]: I1213 07:31:51.054533 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_be46506c-41d0-4b9f-92bd-f34eb6d6a1aa/mysql-bootstrap/0.log" Dec 13 07:31:51 crc kubenswrapper[5048]: I1213 07:31:51.226701 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_be46506c-41d0-4b9f-92bd-f34eb6d6a1aa/mysql-bootstrap/0.log" Dec 13 07:31:51 crc kubenswrapper[5048]: I1213 07:31:51.384698 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_be46506c-41d0-4b9f-92bd-f34eb6d6a1aa/galera/0.log" Dec 13 07:31:51 crc kubenswrapper[5048]: I1213 07:31:51.486153 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_a5870a17-c845-46b8-a25c-8f8822a93cb8/mysql-bootstrap/0.log" Dec 13 07:31:51 crc kubenswrapper[5048]: I1213 07:31:51.689133 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_a5870a17-c845-46b8-a25c-8f8822a93cb8/mysql-bootstrap/0.log" Dec 13 07:31:51 crc kubenswrapper[5048]: I1213 07:31:51.742173 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_a5870a17-c845-46b8-a25c-8f8822a93cb8/galera/0.log" Dec 13 07:31:51 crc kubenswrapper[5048]: I1213 07:31:51.894764 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_c0fd6a3a-5c9b-4d74-bfba-719758182b08/openstackclient/0.log" Dec 13 07:31:51 crc kubenswrapper[5048]: I1213 07:31:51.929364 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_b9502006-ca4a-4a71-90ef-4f86311c70fc/nova-metadata-metadata/0.log" Dec 13 07:31:52 crc kubenswrapper[5048]: I1213 07:31:52.015210 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-2sh28_a8258a39-dbbb-4672-9d88-22749f0c9563/ovn-controller/0.log" Dec 13 07:31:52 crc kubenswrapper[5048]: I1213 07:31:52.147681 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-l4wtt_0da0172c-5a76-44fc-8ff1-e694ba1e083b/openstack-network-exporter/0.log" Dec 13 07:31:52 crc kubenswrapper[5048]: I1213 07:31:52.464718 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-x5dfv_df1e9604-d8b9-4e08-8eb1-9c30b73f6d70/ovsdb-server-init/0.log" Dec 13 07:31:52 crc kubenswrapper[5048]: I1213 07:31:52.646964 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-x5dfv_df1e9604-d8b9-4e08-8eb1-9c30b73f6d70/ovsdb-server-init/0.log" Dec 13 07:31:52 crc kubenswrapper[5048]: I1213 07:31:52.664545 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-x5dfv_df1e9604-d8b9-4e08-8eb1-9c30b73f6d70/ovsdb-server/0.log" Dec 13 07:31:52 crc kubenswrapper[5048]: I1213 07:31:52.670723 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-x5dfv_df1e9604-d8b9-4e08-8eb1-9c30b73f6d70/ovs-vswitchd/0.log" Dec 13 07:31:52 crc kubenswrapper[5048]: I1213 07:31:52.893997 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-s7hbt_c0376236-384b-44b9-abbb-a1fe41557a88/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:31:52 crc kubenswrapper[5048]: I1213 07:31:52.950567 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_3f002b19-41eb-4af1-a6c1-a4639e81417e/ovn-northd/0.log" Dec 13 07:31:52 crc kubenswrapper[5048]: I1213 07:31:52.984335 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_3f002b19-41eb-4af1-a6c1-a4639e81417e/openstack-network-exporter/0.log" Dec 13 07:31:53 crc kubenswrapper[5048]: I1213 07:31:53.153358 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_8c7d8f14-b731-4408-b506-2dac81b2a0a7/openstack-network-exporter/0.log" Dec 13 07:31:53 crc kubenswrapper[5048]: I1213 07:31:53.175538 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_8c7d8f14-b731-4408-b506-2dac81b2a0a7/ovsdbserver-nb/0.log" Dec 13 07:31:53 crc kubenswrapper[5048]: I1213 07:31:53.341724 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_932081d6-9a70-45ec-8738-f0b1265a2a84/openstack-network-exporter/0.log" Dec 13 07:31:53 crc kubenswrapper[5048]: I1213 07:31:53.390176 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_932081d6-9a70-45ec-8738-f0b1265a2a84/ovsdbserver-sb/0.log" Dec 13 07:31:53 crc kubenswrapper[5048]: I1213 07:31:53.636230 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-7b9cc68dcb-22pkt_fb3e7c2a-93fb-4bf8-8447-6d4be22a0760/placement-api/0.log" Dec 13 07:31:53 crc kubenswrapper[5048]: I1213 07:31:53.683061 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-7b9cc68dcb-22pkt_fb3e7c2a-93fb-4bf8-8447-6d4be22a0760/placement-log/0.log" Dec 13 07:31:53 crc kubenswrapper[5048]: I1213 07:31:53.697306 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b6586a43-004c-41f4-9172-b3b385849341/setup-container/0.log" Dec 13 07:31:53 crc kubenswrapper[5048]: I1213 07:31:53.924272 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b6586a43-004c-41f4-9172-b3b385849341/rabbitmq/0.log" Dec 13 07:31:53 crc kubenswrapper[5048]: I1213 07:31:53.924692 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_cd2c9077-4969-4d54-a677-2f84128c1a13/setup-container/0.log" Dec 13 07:31:54 crc kubenswrapper[5048]: I1213 07:31:54.016401 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b6586a43-004c-41f4-9172-b3b385849341/setup-container/0.log" Dec 13 07:31:54 crc kubenswrapper[5048]: I1213 07:31:54.199467 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_cd2c9077-4969-4d54-a677-2f84128c1a13/rabbitmq/0.log" Dec 13 07:31:54 crc kubenswrapper[5048]: I1213 07:31:54.226297 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_cd2c9077-4969-4d54-a677-2f84128c1a13/setup-container/0.log" Dec 13 07:31:54 crc kubenswrapper[5048]: I1213 07:31:54.309337 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb_360c2d75-bc2a-408a-bfa8-4c250e32d6ab/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:31:54 crc kubenswrapper[5048]: I1213 07:31:54.427213 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-rrx8r_db18a520-b418-4ee9-bed9-d72c023c9959/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:31:54 crc kubenswrapper[5048]: I1213 07:31:54.567596 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:31:54 crc kubenswrapper[5048]: E1213 07:31:54.568334 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:31:54 crc kubenswrapper[5048]: I1213 07:31:54.570098 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c_29c60e6c-e671-43eb-ad63-2ccf40ef5719/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:31:54 crc kubenswrapper[5048]: I1213 07:31:54.698719 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-42g5n_bc147090-461f-4896-a08e-59dddc7c14cc/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:31:54 crc kubenswrapper[5048]: I1213 07:31:54.836378 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-mwrz4_a097705b-8b45-470f-8026-744ebdc4083a/ssh-known-hosts-edpm-deployment/0.log" Dec 13 07:31:54 crc kubenswrapper[5048]: I1213 07:31:54.972002 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6c7b8f495-lq789_9d13568e-517b-46ae-b3bd-dfa6ee7b671a/proxy-server/0.log" Dec 13 07:31:55 crc kubenswrapper[5048]: I1213 07:31:55.054814 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6c7b8f495-lq789_9d13568e-517b-46ae-b3bd-dfa6ee7b671a/proxy-httpd/0.log" Dec 13 07:31:55 crc kubenswrapper[5048]: I1213 07:31:55.129875 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-xs94r_76f71d51-d887-428e-bcf0-e07a75cda134/swift-ring-rebalance/0.log" Dec 13 07:31:55 crc kubenswrapper[5048]: I1213 07:31:55.304994 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/account-auditor/0.log" Dec 13 07:31:55 crc kubenswrapper[5048]: I1213 07:31:55.329456 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/account-reaper/0.log" Dec 13 07:31:55 crc kubenswrapper[5048]: I1213 07:31:55.348771 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/account-replicator/0.log" Dec 13 07:31:55 crc kubenswrapper[5048]: I1213 07:31:55.486780 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/container-auditor/0.log" Dec 13 07:31:55 crc kubenswrapper[5048]: I1213 07:31:55.490117 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/account-server/0.log" Dec 13 07:31:55 crc kubenswrapper[5048]: I1213 07:31:55.540274 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/container-replicator/0.log" Dec 13 07:31:55 crc kubenswrapper[5048]: I1213 07:31:55.680345 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/container-server/0.log" Dec 13 07:31:55 crc kubenswrapper[5048]: I1213 07:31:55.844048 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/container-updater/0.log" Dec 13 07:31:55 crc kubenswrapper[5048]: I1213 07:31:55.868873 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/object-auditor/0.log" Dec 13 07:31:55 crc kubenswrapper[5048]: I1213 07:31:55.899146 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/object-expirer/0.log" Dec 13 07:31:55 crc kubenswrapper[5048]: I1213 07:31:55.939075 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/object-replicator/0.log" Dec 13 07:31:56 crc kubenswrapper[5048]: I1213 07:31:56.052729 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/object-updater/0.log" Dec 13 07:31:56 crc kubenswrapper[5048]: I1213 07:31:56.069420 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/rsync/0.log" Dec 13 07:31:56 crc kubenswrapper[5048]: I1213 07:31:56.111556 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/object-server/0.log" Dec 13 07:31:56 crc kubenswrapper[5048]: I1213 07:31:56.166363 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/swift-recon-cron/0.log" Dec 13 07:31:56 crc kubenswrapper[5048]: I1213 07:31:56.304363 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf_13b74976-c0e2-4461-a564-de6ce88aa549/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:31:56 crc kubenswrapper[5048]: I1213 07:31:56.425145 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_b1419e46-ce93-452b-9d88-b9a50e9dbfe6/tempest-tests-tempest-tests-runner/0.log" Dec 13 07:31:56 crc kubenswrapper[5048]: I1213 07:31:56.499696 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_036a76e2-0363-4fe5-98fc-283eea6536e6/test-operator-logs-container/0.log" Dec 13 07:31:56 crc kubenswrapper[5048]: I1213 07:31:56.692287 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5_6a5d7f38-72b9-4092-a948-c775ce64d40c/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:32:04 crc kubenswrapper[5048]: I1213 07:32:04.367642 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_c468ccc6-8384-4a69-ae41-cca18f9233e3/memcached/0.log" Dec 13 07:32:06 crc kubenswrapper[5048]: I1213 07:32:06.573236 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:32:06 crc kubenswrapper[5048]: E1213 07:32:06.573881 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:32:17 crc kubenswrapper[5048]: I1213 07:32:17.567713 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:32:17 crc kubenswrapper[5048]: E1213 07:32:17.568575 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:32:21 crc kubenswrapper[5048]: I1213 07:32:21.882739 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc_35d3bce1-ed03-4919-b1eb-09b5a630a578/util/0.log" Dec 13 07:32:22 crc kubenswrapper[5048]: I1213 07:32:22.064633 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc_35d3bce1-ed03-4919-b1eb-09b5a630a578/util/0.log" Dec 13 07:32:22 crc kubenswrapper[5048]: I1213 07:32:22.068385 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc_35d3bce1-ed03-4919-b1eb-09b5a630a578/pull/0.log" Dec 13 07:32:22 crc kubenswrapper[5048]: I1213 07:32:22.085338 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc_35d3bce1-ed03-4919-b1eb-09b5a630a578/pull/0.log" Dec 13 07:32:22 crc kubenswrapper[5048]: I1213 07:32:22.257378 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc_35d3bce1-ed03-4919-b1eb-09b5a630a578/extract/0.log" Dec 13 07:32:22 crc kubenswrapper[5048]: I1213 07:32:22.257825 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc_35d3bce1-ed03-4919-b1eb-09b5a630a578/util/0.log" Dec 13 07:32:22 crc kubenswrapper[5048]: I1213 07:32:22.258877 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc_35d3bce1-ed03-4919-b1eb-09b5a630a578/pull/0.log" Dec 13 07:32:22 crc kubenswrapper[5048]: I1213 07:32:22.831364 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-95949466-xmb6d_50877cb8-07a0-48c4-af3d-72144fa836e0/manager/0.log" Dec 13 07:32:22 crc kubenswrapper[5048]: I1213 07:32:22.835209 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-5cf45c46bd-mblxt_6d515ed0-b2e1-469e-a7c5-bbe62664979e/manager/0.log" Dec 13 07:32:23 crc kubenswrapper[5048]: I1213 07:32:23.004902 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-66f8b87655-ll9d4_83f0165e-ed2b-436c-9ae0-e871bd291638/manager/0.log" Dec 13 07:32:23 crc kubenswrapper[5048]: I1213 07:32:23.087778 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-767f9d7567-md4jk_4d953dd1-d113-4dc6-a80b-3ded9d08b476/manager/0.log" Dec 13 07:32:23 crc kubenswrapper[5048]: I1213 07:32:23.246206 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-59b8dcb766-sn85z_07f258ba-5ff1-4d34-8e07-62c024c15dba/manager/0.log" Dec 13 07:32:23 crc kubenswrapper[5048]: I1213 07:32:23.300980 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-6ccf486b9-g6ggd_9bd1a72e-ad49-46ae-a748-a21d05114b84/manager/0.log" Dec 13 07:32:23 crc kubenswrapper[5048]: I1213 07:32:23.492826 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-f458558d7-xxj48_b47e0c4b-dd06-4d55-b3d7-8e3d968df8e6/manager/0.log" Dec 13 07:32:23 crc kubenswrapper[5048]: I1213 07:32:23.619045 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-7cf9bd88b6-wswvl_d1a74609-2a57-44a5-8e88-dab8ae7fba98/manager/0.log" Dec 13 07:32:23 crc kubenswrapper[5048]: I1213 07:32:23.743001 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-5c7cbf548f-fdf2l_148d1633-90ad-45da-af0b-5b182ee41795/manager/0.log" Dec 13 07:32:23 crc kubenswrapper[5048]: I1213 07:32:23.791769 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5fdd9786f7-mkjnl_c58196eb-9b95-450a-98ff-d852ff7125c5/manager/0.log" Dec 13 07:32:24 crc kubenswrapper[5048]: I1213 07:32:24.090916 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-f76f4954c-bpgwj_1994b961-1801-4279-8c61-a901803b4a3a/manager/0.log" Dec 13 07:32:24 crc kubenswrapper[5048]: I1213 07:32:24.113887 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7cd87b778f-zgrtn_72736371-ff33-45e1-a685-9e2f89dcec60/manager/0.log" Dec 13 07:32:24 crc kubenswrapper[5048]: I1213 07:32:24.317404 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-68c649d9d-gqsj6_659a6b7b-7ef9-4fc2-8ea4-4298020aa94c/manager/0.log" Dec 13 07:32:24 crc kubenswrapper[5048]: I1213 07:32:24.365109 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-5fbbf8b6cc-fklqp_201cc161-1f13-498b-b99a-0d9c91bdc15a/manager/0.log" Dec 13 07:32:24 crc kubenswrapper[5048]: I1213 07:32:24.504574 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878_db14d0ba-f68b-48a5-b69a-97399548fca1/manager/0.log" Dec 13 07:32:24 crc kubenswrapper[5048]: I1213 07:32:24.902699 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-qlml5_60f70325-e4e1-4fdc-ba21-c92b6ed5967e/registry-server/0.log" Dec 13 07:32:24 crc kubenswrapper[5048]: I1213 07:32:24.909464 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-576c4d554c-lf2gp_46a5447f-e917-4c59-8735-db6e8dce1527/operator/0.log" Dec 13 07:32:25 crc kubenswrapper[5048]: I1213 07:32:25.182890 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-bf6d4f946-4ck8m_859bbc21-6f39-4712-a04f-4473b78b32eb/manager/0.log" Dec 13 07:32:25 crc kubenswrapper[5048]: I1213 07:32:25.358554 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-8665b56d78-g782d_6d4d57f0-ff75-455d-8e8a-fe4947b3ee40/manager/0.log" Dec 13 07:32:25 crc kubenswrapper[5048]: I1213 07:32:25.433103 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-fvxqc_4b1fbe4c-27c1-4456-bf01-6a42320cb63d/operator/0.log" Dec 13 07:32:25 crc kubenswrapper[5048]: I1213 07:32:25.624295 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5c6df8f9-sx7sf_813f57d5-1063-4dbc-9847-b6ea97e46fbe/manager/0.log" Dec 13 07:32:25 crc kubenswrapper[5048]: I1213 07:32:25.737670 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-97d456b9-2bg97_89f4ffab-8c61-4389-8f41-43cd8e2d54de/manager/0.log" Dec 13 07:32:25 crc kubenswrapper[5048]: I1213 07:32:25.931656 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-756ccf86c7-dv9ww_a046de13-f7f7-4d7c-abf3-79ed8cc60fad/manager/0.log" Dec 13 07:32:25 crc kubenswrapper[5048]: I1213 07:32:25.953048 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-58d7cfb75d-nsmnm_fea58380-4304-485b-aefc-48f9baea4126/manager/0.log" Dec 13 07:32:26 crc kubenswrapper[5048]: I1213 07:32:26.020689 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-55f78b7c4c-b4f6l_7165a29e-88bf-4194-bffa-414a675d1be5/manager/0.log" Dec 13 07:32:29 crc kubenswrapper[5048]: I1213 07:32:29.639144 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:32:29 crc kubenswrapper[5048]: E1213 07:32:29.639923 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:32:42 crc kubenswrapper[5048]: I1213 07:32:42.567458 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:32:42 crc kubenswrapper[5048]: E1213 07:32:42.573638 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:32:48 crc kubenswrapper[5048]: I1213 07:32:48.540348 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-l5kdh_cdcbb504-e2d6-4511-bf24-d18ba641f45b/control-plane-machine-set-operator/0.log" Dec 13 07:32:48 crc kubenswrapper[5048]: I1213 07:32:48.710860 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-x5sc8_81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4/kube-rbac-proxy/0.log" Dec 13 07:32:48 crc kubenswrapper[5048]: I1213 07:32:48.770534 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-x5sc8_81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4/machine-api-operator/0.log" Dec 13 07:32:54 crc kubenswrapper[5048]: I1213 07:32:54.567192 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:32:54 crc kubenswrapper[5048]: E1213 07:32:54.568511 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:33:01 crc kubenswrapper[5048]: I1213 07:33:01.436521 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-tb8lp_fe89c6cd-2d34-47e7-9ff7-cba95295e680/cert-manager-controller/0.log" Dec 13 07:33:01 crc kubenswrapper[5048]: I1213 07:33:01.594920 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-v5shf_6a17b3c4-050d-40b0-82d8-d9208365e261/cert-manager-cainjector/0.log" Dec 13 07:33:01 crc kubenswrapper[5048]: I1213 07:33:01.629687 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-m5zdt_2a9b7a7a-7741-4f30-8e7f-a9475784f796/cert-manager-webhook/0.log" Dec 13 07:33:05 crc kubenswrapper[5048]: I1213 07:33:05.061338 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zqbj8"] Dec 13 07:33:05 crc kubenswrapper[5048]: E1213 07:33:05.062878 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad02faeb-a1ee-45a4-924c-83c1f17df2f4" containerName="container-00" Dec 13 07:33:05 crc kubenswrapper[5048]: I1213 07:33:05.062906 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad02faeb-a1ee-45a4-924c-83c1f17df2f4" containerName="container-00" Dec 13 07:33:05 crc kubenswrapper[5048]: I1213 07:33:05.063230 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad02faeb-a1ee-45a4-924c-83c1f17df2f4" containerName="container-00" Dec 13 07:33:05 crc kubenswrapper[5048]: I1213 07:33:05.066074 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zqbj8" Dec 13 07:33:05 crc kubenswrapper[5048]: I1213 07:33:05.079462 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4a725d5-69c3-410e-8add-13fd97239e2b-utilities\") pod \"certified-operators-zqbj8\" (UID: \"e4a725d5-69c3-410e-8add-13fd97239e2b\") " pod="openshift-marketplace/certified-operators-zqbj8" Dec 13 07:33:05 crc kubenswrapper[5048]: I1213 07:33:05.079654 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4a725d5-69c3-410e-8add-13fd97239e2b-catalog-content\") pod \"certified-operators-zqbj8\" (UID: \"e4a725d5-69c3-410e-8add-13fd97239e2b\") " pod="openshift-marketplace/certified-operators-zqbj8" Dec 13 07:33:05 crc kubenswrapper[5048]: I1213 07:33:05.079751 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j48pb\" (UniqueName: \"kubernetes.io/projected/e4a725d5-69c3-410e-8add-13fd97239e2b-kube-api-access-j48pb\") pod \"certified-operators-zqbj8\" (UID: \"e4a725d5-69c3-410e-8add-13fd97239e2b\") " pod="openshift-marketplace/certified-operators-zqbj8" Dec 13 07:33:05 crc kubenswrapper[5048]: I1213 07:33:05.081375 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zqbj8"] Dec 13 07:33:05 crc kubenswrapper[5048]: I1213 07:33:05.180342 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j48pb\" (UniqueName: \"kubernetes.io/projected/e4a725d5-69c3-410e-8add-13fd97239e2b-kube-api-access-j48pb\") pod \"certified-operators-zqbj8\" (UID: \"e4a725d5-69c3-410e-8add-13fd97239e2b\") " pod="openshift-marketplace/certified-operators-zqbj8" Dec 13 07:33:05 crc kubenswrapper[5048]: I1213 07:33:05.180449 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4a725d5-69c3-410e-8add-13fd97239e2b-utilities\") pod \"certified-operators-zqbj8\" (UID: \"e4a725d5-69c3-410e-8add-13fd97239e2b\") " pod="openshift-marketplace/certified-operators-zqbj8" Dec 13 07:33:05 crc kubenswrapper[5048]: I1213 07:33:05.180524 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4a725d5-69c3-410e-8add-13fd97239e2b-catalog-content\") pod \"certified-operators-zqbj8\" (UID: \"e4a725d5-69c3-410e-8add-13fd97239e2b\") " pod="openshift-marketplace/certified-operators-zqbj8" Dec 13 07:33:05 crc kubenswrapper[5048]: I1213 07:33:05.180964 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4a725d5-69c3-410e-8add-13fd97239e2b-catalog-content\") pod \"certified-operators-zqbj8\" (UID: \"e4a725d5-69c3-410e-8add-13fd97239e2b\") " pod="openshift-marketplace/certified-operators-zqbj8" Dec 13 07:33:05 crc kubenswrapper[5048]: I1213 07:33:05.181314 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4a725d5-69c3-410e-8add-13fd97239e2b-utilities\") pod \"certified-operators-zqbj8\" (UID: \"e4a725d5-69c3-410e-8add-13fd97239e2b\") " pod="openshift-marketplace/certified-operators-zqbj8" Dec 13 07:33:05 crc kubenswrapper[5048]: I1213 07:33:05.209329 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j48pb\" (UniqueName: \"kubernetes.io/projected/e4a725d5-69c3-410e-8add-13fd97239e2b-kube-api-access-j48pb\") pod \"certified-operators-zqbj8\" (UID: \"e4a725d5-69c3-410e-8add-13fd97239e2b\") " pod="openshift-marketplace/certified-operators-zqbj8" Dec 13 07:33:05 crc kubenswrapper[5048]: I1213 07:33:05.429524 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zqbj8" Dec 13 07:33:06 crc kubenswrapper[5048]: I1213 07:33:06.029771 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zqbj8"] Dec 13 07:33:06 crc kubenswrapper[5048]: I1213 07:33:06.415074 5048 generic.go:334] "Generic (PLEG): container finished" podID="e4a725d5-69c3-410e-8add-13fd97239e2b" containerID="79e8f287c39b308c19b69dcab8a4c663ff04c7bad69c77ed1c49bee83435bb76" exitCode=0 Dec 13 07:33:06 crc kubenswrapper[5048]: I1213 07:33:06.415714 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zqbj8" event={"ID":"e4a725d5-69c3-410e-8add-13fd97239e2b","Type":"ContainerDied","Data":"79e8f287c39b308c19b69dcab8a4c663ff04c7bad69c77ed1c49bee83435bb76"} Dec 13 07:33:06 crc kubenswrapper[5048]: I1213 07:33:06.418594 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zqbj8" event={"ID":"e4a725d5-69c3-410e-8add-13fd97239e2b","Type":"ContainerStarted","Data":"03ec3d0fe9c64985b84f18b02e16518f3abec51513e39242ddc8223376a28534"} Dec 13 07:33:06 crc kubenswrapper[5048]: I1213 07:33:06.418034 5048 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 13 07:33:08 crc kubenswrapper[5048]: I1213 07:33:08.567407 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:33:08 crc kubenswrapper[5048]: E1213 07:33:08.568207 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:33:09 crc kubenswrapper[5048]: I1213 07:33:09.451478 5048 generic.go:334] "Generic (PLEG): container finished" podID="e4a725d5-69c3-410e-8add-13fd97239e2b" containerID="786922ce8b13d87f539152e7c8c1a6d9b52411133dafcb5ee7fc9904bb0480cb" exitCode=0 Dec 13 07:33:09 crc kubenswrapper[5048]: I1213 07:33:09.451565 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zqbj8" event={"ID":"e4a725d5-69c3-410e-8add-13fd97239e2b","Type":"ContainerDied","Data":"786922ce8b13d87f539152e7c8c1a6d9b52411133dafcb5ee7fc9904bb0480cb"} Dec 13 07:33:10 crc kubenswrapper[5048]: I1213 07:33:10.477827 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zqbj8" event={"ID":"e4a725d5-69c3-410e-8add-13fd97239e2b","Type":"ContainerStarted","Data":"62b0c046fa358aed3a91fb45b3e953dc58fa5dc8160e90f8920258c8543f6edb"} Dec 13 07:33:14 crc kubenswrapper[5048]: I1213 07:33:14.677506 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-6ff7998486-ppsbf_85ef8f1d-8549-4a84-a38c-aefa9e4e5583/nmstate-console-plugin/0.log" Dec 13 07:33:15 crc kubenswrapper[5048]: I1213 07:33:15.008553 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-b4xzd_d4103fb8-a0f4-4b91-b08a-6047a1f4df6e/nmstate-handler/0.log" Dec 13 07:33:15 crc kubenswrapper[5048]: I1213 07:33:15.065275 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f7f7578db-wvmhg_4fe5b93a-b962-4feb-85cf-1210c428f7f6/kube-rbac-proxy/0.log" Dec 13 07:33:15 crc kubenswrapper[5048]: I1213 07:33:15.068173 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f7f7578db-wvmhg_4fe5b93a-b962-4feb-85cf-1210c428f7f6/nmstate-metrics/0.log" Dec 13 07:33:15 crc kubenswrapper[5048]: I1213 07:33:15.202703 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-6769fb99d-lk9l5_ce50f1e5-0177-4f3d-a02a-5af653f70001/nmstate-operator/0.log" Dec 13 07:33:15 crc kubenswrapper[5048]: I1213 07:33:15.289401 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-f8fb84555-bswmw_a6abecb0-a854-4dbb-9fb0-5ba03e64daae/nmstate-webhook/0.log" Dec 13 07:33:15 crc kubenswrapper[5048]: I1213 07:33:15.430071 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zqbj8" Dec 13 07:33:15 crc kubenswrapper[5048]: I1213 07:33:15.431463 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zqbj8" Dec 13 07:33:15 crc kubenswrapper[5048]: I1213 07:33:15.476847 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zqbj8" Dec 13 07:33:15 crc kubenswrapper[5048]: I1213 07:33:15.510753 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zqbj8" podStartSLOduration=7.058087228 podStartE2EDuration="10.510729541s" podCreationTimestamp="2025-12-13 07:33:05 +0000 UTC" firstStartedPulling="2025-12-13 07:33:06.41772024 +0000 UTC m=+3820.284314831" lastFinishedPulling="2025-12-13 07:33:09.870362553 +0000 UTC m=+3823.736957144" observedRunningTime="2025-12-13 07:33:10.496950599 +0000 UTC m=+3824.363545190" watchObservedRunningTime="2025-12-13 07:33:15.510729541 +0000 UTC m=+3829.377324122" Dec 13 07:33:15 crc kubenswrapper[5048]: I1213 07:33:15.564911 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zqbj8" Dec 13 07:33:15 crc kubenswrapper[5048]: I1213 07:33:15.712087 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zqbj8"] Dec 13 07:33:17 crc kubenswrapper[5048]: I1213 07:33:17.540397 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zqbj8" podUID="e4a725d5-69c3-410e-8add-13fd97239e2b" containerName="registry-server" containerID="cri-o://62b0c046fa358aed3a91fb45b3e953dc58fa5dc8160e90f8920258c8543f6edb" gracePeriod=2 Dec 13 07:33:18 crc kubenswrapper[5048]: I1213 07:33:18.552294 5048 generic.go:334] "Generic (PLEG): container finished" podID="e4a725d5-69c3-410e-8add-13fd97239e2b" containerID="62b0c046fa358aed3a91fb45b3e953dc58fa5dc8160e90f8920258c8543f6edb" exitCode=0 Dec 13 07:33:18 crc kubenswrapper[5048]: I1213 07:33:18.552376 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zqbj8" event={"ID":"e4a725d5-69c3-410e-8add-13fd97239e2b","Type":"ContainerDied","Data":"62b0c046fa358aed3a91fb45b3e953dc58fa5dc8160e90f8920258c8543f6edb"} Dec 13 07:33:19 crc kubenswrapper[5048]: I1213 07:33:19.138495 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zqbj8" Dec 13 07:33:19 crc kubenswrapper[5048]: I1213 07:33:19.191451 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4a725d5-69c3-410e-8add-13fd97239e2b-utilities\") pod \"e4a725d5-69c3-410e-8add-13fd97239e2b\" (UID: \"e4a725d5-69c3-410e-8add-13fd97239e2b\") " Dec 13 07:33:19 crc kubenswrapper[5048]: I1213 07:33:19.191509 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4a725d5-69c3-410e-8add-13fd97239e2b-catalog-content\") pod \"e4a725d5-69c3-410e-8add-13fd97239e2b\" (UID: \"e4a725d5-69c3-410e-8add-13fd97239e2b\") " Dec 13 07:33:19 crc kubenswrapper[5048]: I1213 07:33:19.191684 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j48pb\" (UniqueName: \"kubernetes.io/projected/e4a725d5-69c3-410e-8add-13fd97239e2b-kube-api-access-j48pb\") pod \"e4a725d5-69c3-410e-8add-13fd97239e2b\" (UID: \"e4a725d5-69c3-410e-8add-13fd97239e2b\") " Dec 13 07:33:19 crc kubenswrapper[5048]: I1213 07:33:19.192604 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e4a725d5-69c3-410e-8add-13fd97239e2b-utilities" (OuterVolumeSpecName: "utilities") pod "e4a725d5-69c3-410e-8add-13fd97239e2b" (UID: "e4a725d5-69c3-410e-8add-13fd97239e2b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:33:19 crc kubenswrapper[5048]: I1213 07:33:19.198813 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4a725d5-69c3-410e-8add-13fd97239e2b-kube-api-access-j48pb" (OuterVolumeSpecName: "kube-api-access-j48pb") pod "e4a725d5-69c3-410e-8add-13fd97239e2b" (UID: "e4a725d5-69c3-410e-8add-13fd97239e2b"). InnerVolumeSpecName "kube-api-access-j48pb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:33:19 crc kubenswrapper[5048]: I1213 07:33:19.241461 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e4a725d5-69c3-410e-8add-13fd97239e2b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e4a725d5-69c3-410e-8add-13fd97239e2b" (UID: "e4a725d5-69c3-410e-8add-13fd97239e2b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:33:19 crc kubenswrapper[5048]: I1213 07:33:19.295638 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4a725d5-69c3-410e-8add-13fd97239e2b-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 07:33:19 crc kubenswrapper[5048]: I1213 07:33:19.295685 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4a725d5-69c3-410e-8add-13fd97239e2b-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 07:33:19 crc kubenswrapper[5048]: I1213 07:33:19.295700 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j48pb\" (UniqueName: \"kubernetes.io/projected/e4a725d5-69c3-410e-8add-13fd97239e2b-kube-api-access-j48pb\") on node \"crc\" DevicePath \"\"" Dec 13 07:33:19 crc kubenswrapper[5048]: I1213 07:33:19.562799 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zqbj8" event={"ID":"e4a725d5-69c3-410e-8add-13fd97239e2b","Type":"ContainerDied","Data":"03ec3d0fe9c64985b84f18b02e16518f3abec51513e39242ddc8223376a28534"} Dec 13 07:33:19 crc kubenswrapper[5048]: I1213 07:33:19.562853 5048 scope.go:117] "RemoveContainer" containerID="62b0c046fa358aed3a91fb45b3e953dc58fa5dc8160e90f8920258c8543f6edb" Dec 13 07:33:19 crc kubenswrapper[5048]: I1213 07:33:19.562866 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zqbj8" Dec 13 07:33:19 crc kubenswrapper[5048]: I1213 07:33:19.587972 5048 scope.go:117] "RemoveContainer" containerID="786922ce8b13d87f539152e7c8c1a6d9b52411133dafcb5ee7fc9904bb0480cb" Dec 13 07:33:19 crc kubenswrapper[5048]: I1213 07:33:19.615246 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zqbj8"] Dec 13 07:33:19 crc kubenswrapper[5048]: I1213 07:33:19.621244 5048 scope.go:117] "RemoveContainer" containerID="79e8f287c39b308c19b69dcab8a4c663ff04c7bad69c77ed1c49bee83435bb76" Dec 13 07:33:19 crc kubenswrapper[5048]: I1213 07:33:19.628752 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zqbj8"] Dec 13 07:33:20 crc kubenswrapper[5048]: I1213 07:33:20.595190 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4a725d5-69c3-410e-8add-13fd97239e2b" path="/var/lib/kubelet/pods/e4a725d5-69c3-410e-8add-13fd97239e2b/volumes" Dec 13 07:33:21 crc kubenswrapper[5048]: I1213 07:33:21.567299 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:33:21 crc kubenswrapper[5048]: E1213 07:33:21.567872 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:33:30 crc kubenswrapper[5048]: I1213 07:33:30.123731 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-5bddd4b946-x8wnx_2adbe773-2087-440f-bcdd-91ccb9eaa03f/kube-rbac-proxy/0.log" Dec 13 07:33:30 crc kubenswrapper[5048]: I1213 07:33:30.186626 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-5bddd4b946-x8wnx_2adbe773-2087-440f-bcdd-91ccb9eaa03f/controller/0.log" Dec 13 07:33:30 crc kubenswrapper[5048]: I1213 07:33:30.275525 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-frr-files/0.log" Dec 13 07:33:30 crc kubenswrapper[5048]: I1213 07:33:30.442504 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-frr-files/0.log" Dec 13 07:33:30 crc kubenswrapper[5048]: I1213 07:33:30.487977 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-reloader/0.log" Dec 13 07:33:30 crc kubenswrapper[5048]: I1213 07:33:30.500812 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-metrics/0.log" Dec 13 07:33:30 crc kubenswrapper[5048]: I1213 07:33:30.507356 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-reloader/0.log" Dec 13 07:33:30 crc kubenswrapper[5048]: I1213 07:33:30.662940 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-metrics/0.log" Dec 13 07:33:30 crc kubenswrapper[5048]: I1213 07:33:30.691349 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-frr-files/0.log" Dec 13 07:33:30 crc kubenswrapper[5048]: I1213 07:33:30.723139 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-metrics/0.log" Dec 13 07:33:30 crc kubenswrapper[5048]: I1213 07:33:30.760773 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-reloader/0.log" Dec 13 07:33:30 crc kubenswrapper[5048]: I1213 07:33:30.937734 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-reloader/0.log" Dec 13 07:33:30 crc kubenswrapper[5048]: I1213 07:33:30.984616 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-frr-files/0.log" Dec 13 07:33:30 crc kubenswrapper[5048]: I1213 07:33:30.991022 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/controller/0.log" Dec 13 07:33:31 crc kubenswrapper[5048]: I1213 07:33:31.009336 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-metrics/0.log" Dec 13 07:33:31 crc kubenswrapper[5048]: I1213 07:33:31.167895 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/frr-metrics/0.log" Dec 13 07:33:31 crc kubenswrapper[5048]: I1213 07:33:31.202321 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/kube-rbac-proxy/0.log" Dec 13 07:33:31 crc kubenswrapper[5048]: I1213 07:33:31.229308 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/kube-rbac-proxy-frr/0.log" Dec 13 07:33:31 crc kubenswrapper[5048]: I1213 07:33:31.402098 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/reloader/0.log" Dec 13 07:33:31 crc kubenswrapper[5048]: I1213 07:33:31.421591 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7784b6fcf-cwjgt_fb943e2e-85ed-4508-ade6-d16343977d3d/frr-k8s-webhook-server/0.log" Dec 13 07:33:31 crc kubenswrapper[5048]: I1213 07:33:31.784389 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-955d4d9d6-6w2dd_320194d2-61c0-4149-9458-a880711d4edf/manager/0.log" Dec 13 07:33:32 crc kubenswrapper[5048]: I1213 07:33:32.014629 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-56bb4c4c65-p9qls_0618bcc5-697d-4f5b-bbab-a84d868b5d32/webhook-server/0.log" Dec 13 07:33:32 crc kubenswrapper[5048]: I1213 07:33:32.040526 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-fj6dj_3c80ea24-095d-4c79-9328-2b3433da583c/kube-rbac-proxy/0.log" Dec 13 07:33:32 crc kubenswrapper[5048]: I1213 07:33:32.243600 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/frr/0.log" Dec 13 07:33:32 crc kubenswrapper[5048]: I1213 07:33:32.558187 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-fj6dj_3c80ea24-095d-4c79-9328-2b3433da583c/speaker/0.log" Dec 13 07:33:33 crc kubenswrapper[5048]: I1213 07:33:33.567183 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:33:33 crc kubenswrapper[5048]: E1213 07:33:33.567555 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:33:46 crc kubenswrapper[5048]: I1213 07:33:46.925816 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6_fe828039-1927-4005-a732-ce1cb2fb898c/util/0.log" Dec 13 07:33:46 crc kubenswrapper[5048]: I1213 07:33:46.926285 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6_fe828039-1927-4005-a732-ce1cb2fb898c/util/0.log" Dec 13 07:33:46 crc kubenswrapper[5048]: I1213 07:33:46.937888 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6_fe828039-1927-4005-a732-ce1cb2fb898c/pull/0.log" Dec 13 07:33:46 crc kubenswrapper[5048]: I1213 07:33:46.938259 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6_fe828039-1927-4005-a732-ce1cb2fb898c/pull/0.log" Dec 13 07:33:47 crc kubenswrapper[5048]: I1213 07:33:47.158372 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6_fe828039-1927-4005-a732-ce1cb2fb898c/extract/0.log" Dec 13 07:33:47 crc kubenswrapper[5048]: I1213 07:33:47.222840 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6_fe828039-1927-4005-a732-ce1cb2fb898c/util/0.log" Dec 13 07:33:47 crc kubenswrapper[5048]: I1213 07:33:47.244534 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6_fe828039-1927-4005-a732-ce1cb2fb898c/pull/0.log" Dec 13 07:33:47 crc kubenswrapper[5048]: I1213 07:33:47.515549 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts_4f04fec8-b2a6-4634-b1af-0c47285bad86/util/0.log" Dec 13 07:33:47 crc kubenswrapper[5048]: I1213 07:33:47.566638 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:33:47 crc kubenswrapper[5048]: E1213 07:33:47.566974 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:33:47 crc kubenswrapper[5048]: I1213 07:33:47.656493 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts_4f04fec8-b2a6-4634-b1af-0c47285bad86/util/0.log" Dec 13 07:33:47 crc kubenswrapper[5048]: I1213 07:33:47.678303 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts_4f04fec8-b2a6-4634-b1af-0c47285bad86/pull/0.log" Dec 13 07:33:47 crc kubenswrapper[5048]: I1213 07:33:47.678596 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts_4f04fec8-b2a6-4634-b1af-0c47285bad86/pull/0.log" Dec 13 07:33:47 crc kubenswrapper[5048]: I1213 07:33:47.873665 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts_4f04fec8-b2a6-4634-b1af-0c47285bad86/extract/0.log" Dec 13 07:33:47 crc kubenswrapper[5048]: I1213 07:33:47.874964 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts_4f04fec8-b2a6-4634-b1af-0c47285bad86/pull/0.log" Dec 13 07:33:47 crc kubenswrapper[5048]: I1213 07:33:47.888941 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts_4f04fec8-b2a6-4634-b1af-0c47285bad86/util/0.log" Dec 13 07:33:48 crc kubenswrapper[5048]: I1213 07:33:48.071931 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-g8j5s_07c47b88-4b41-47d9-ae48-feacb3431a47/extract-utilities/0.log" Dec 13 07:33:48 crc kubenswrapper[5048]: I1213 07:33:48.208990 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-g8j5s_07c47b88-4b41-47d9-ae48-feacb3431a47/extract-utilities/0.log" Dec 13 07:33:48 crc kubenswrapper[5048]: I1213 07:33:48.252394 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-g8j5s_07c47b88-4b41-47d9-ae48-feacb3431a47/extract-content/0.log" Dec 13 07:33:48 crc kubenswrapper[5048]: I1213 07:33:48.277881 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-g8j5s_07c47b88-4b41-47d9-ae48-feacb3431a47/extract-content/0.log" Dec 13 07:33:48 crc kubenswrapper[5048]: I1213 07:33:48.446183 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-g8j5s_07c47b88-4b41-47d9-ae48-feacb3431a47/extract-utilities/0.log" Dec 13 07:33:48 crc kubenswrapper[5048]: I1213 07:33:48.455951 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-g8j5s_07c47b88-4b41-47d9-ae48-feacb3431a47/extract-content/0.log" Dec 13 07:33:48 crc kubenswrapper[5048]: I1213 07:33:48.720851 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-ngg4l_2a00d4f4-f561-491b-a236-7e46f411f58e/extract-utilities/0.log" Dec 13 07:33:49 crc kubenswrapper[5048]: I1213 07:33:49.006300 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-ngg4l_2a00d4f4-f561-491b-a236-7e46f411f58e/extract-utilities/0.log" Dec 13 07:33:49 crc kubenswrapper[5048]: I1213 07:33:49.092020 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-ngg4l_2a00d4f4-f561-491b-a236-7e46f411f58e/extract-content/0.log" Dec 13 07:33:49 crc kubenswrapper[5048]: I1213 07:33:49.099098 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-ngg4l_2a00d4f4-f561-491b-a236-7e46f411f58e/extract-content/0.log" Dec 13 07:33:49 crc kubenswrapper[5048]: I1213 07:33:49.112653 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-g8j5s_07c47b88-4b41-47d9-ae48-feacb3431a47/registry-server/0.log" Dec 13 07:33:49 crc kubenswrapper[5048]: I1213 07:33:49.242642 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-ngg4l_2a00d4f4-f561-491b-a236-7e46f411f58e/extract-utilities/0.log" Dec 13 07:33:49 crc kubenswrapper[5048]: I1213 07:33:49.272202 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-ngg4l_2a00d4f4-f561-491b-a236-7e46f411f58e/extract-content/0.log" Dec 13 07:33:49 crc kubenswrapper[5048]: I1213 07:33:49.520154 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-zm7qx_60f5ae10-2f86-46f8-b613-f017b8753690/marketplace-operator/3.log" Dec 13 07:33:49 crc kubenswrapper[5048]: I1213 07:33:49.532221 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bgnnj_0cdd6ca2-d077-4ea1-8dda-6fadccca087d/extract-utilities/0.log" Dec 13 07:33:49 crc kubenswrapper[5048]: I1213 07:33:49.560221 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-zm7qx_60f5ae10-2f86-46f8-b613-f017b8753690/marketplace-operator/2.log" Dec 13 07:33:49 crc kubenswrapper[5048]: I1213 07:33:49.760066 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bgnnj_0cdd6ca2-d077-4ea1-8dda-6fadccca087d/extract-utilities/0.log" Dec 13 07:33:49 crc kubenswrapper[5048]: I1213 07:33:49.814073 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bgnnj_0cdd6ca2-d077-4ea1-8dda-6fadccca087d/extract-content/0.log" Dec 13 07:33:49 crc kubenswrapper[5048]: I1213 07:33:49.814367 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bgnnj_0cdd6ca2-d077-4ea1-8dda-6fadccca087d/extract-content/0.log" Dec 13 07:33:49 crc kubenswrapper[5048]: I1213 07:33:49.864249 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-ngg4l_2a00d4f4-f561-491b-a236-7e46f411f58e/registry-server/0.log" Dec 13 07:33:50 crc kubenswrapper[5048]: I1213 07:33:50.036647 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bgnnj_0cdd6ca2-d077-4ea1-8dda-6fadccca087d/extract-utilities/0.log" Dec 13 07:33:50 crc kubenswrapper[5048]: I1213 07:33:50.052920 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bgnnj_0cdd6ca2-d077-4ea1-8dda-6fadccca087d/extract-content/0.log" Dec 13 07:33:50 crc kubenswrapper[5048]: I1213 07:33:50.208327 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dqz4m_152d7097-80ce-42d3-b0a8-45e04a295b3d/extract-utilities/0.log" Dec 13 07:33:50 crc kubenswrapper[5048]: I1213 07:33:50.223377 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bgnnj_0cdd6ca2-d077-4ea1-8dda-6fadccca087d/registry-server/0.log" Dec 13 07:33:50 crc kubenswrapper[5048]: I1213 07:33:50.303699 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dqz4m_152d7097-80ce-42d3-b0a8-45e04a295b3d/extract-content/0.log" Dec 13 07:33:50 crc kubenswrapper[5048]: I1213 07:33:50.310046 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dqz4m_152d7097-80ce-42d3-b0a8-45e04a295b3d/extract-content/0.log" Dec 13 07:33:50 crc kubenswrapper[5048]: I1213 07:33:50.340069 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dqz4m_152d7097-80ce-42d3-b0a8-45e04a295b3d/extract-utilities/0.log" Dec 13 07:33:50 crc kubenswrapper[5048]: I1213 07:33:50.482768 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dqz4m_152d7097-80ce-42d3-b0a8-45e04a295b3d/extract-content/0.log" Dec 13 07:33:50 crc kubenswrapper[5048]: I1213 07:33:50.542213 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dqz4m_152d7097-80ce-42d3-b0a8-45e04a295b3d/extract-utilities/0.log" Dec 13 07:33:50 crc kubenswrapper[5048]: I1213 07:33:50.924094 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dqz4m_152d7097-80ce-42d3-b0a8-45e04a295b3d/registry-server/0.log" Dec 13 07:33:59 crc kubenswrapper[5048]: I1213 07:33:59.567117 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:33:59 crc kubenswrapper[5048]: E1213 07:33:59.567984 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:34:14 crc kubenswrapper[5048]: I1213 07:34:14.566655 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:34:14 crc kubenswrapper[5048]: E1213 07:34:14.567480 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:34:26 crc kubenswrapper[5048]: I1213 07:34:26.582551 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:34:26 crc kubenswrapper[5048]: E1213 07:34:26.583236 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:34:39 crc kubenswrapper[5048]: I1213 07:34:39.568005 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:34:39 crc kubenswrapper[5048]: E1213 07:34:39.569269 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:34:54 crc kubenswrapper[5048]: I1213 07:34:54.568170 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:34:54 crc kubenswrapper[5048]: E1213 07:34:54.569382 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:35:09 crc kubenswrapper[5048]: I1213 07:35:09.567227 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:35:09 crc kubenswrapper[5048]: E1213 07:35:09.569106 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:35:23 crc kubenswrapper[5048]: I1213 07:35:23.568680 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:35:23 crc kubenswrapper[5048]: E1213 07:35:23.569707 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:35:29 crc kubenswrapper[5048]: I1213 07:35:29.931276 5048 generic.go:334] "Generic (PLEG): container finished" podID="59caec1f-b50d-4c55-9b3f-9e646f0642ec" containerID="86e373bd76a759258716285640078fca42454a17b2ba94681932c0e5e163b381" exitCode=0 Dec 13 07:35:29 crc kubenswrapper[5048]: I1213 07:35:29.931357 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-n9sbw/must-gather-whcvp" event={"ID":"59caec1f-b50d-4c55-9b3f-9e646f0642ec","Type":"ContainerDied","Data":"86e373bd76a759258716285640078fca42454a17b2ba94681932c0e5e163b381"} Dec 13 07:35:29 crc kubenswrapper[5048]: I1213 07:35:29.932672 5048 scope.go:117] "RemoveContainer" containerID="86e373bd76a759258716285640078fca42454a17b2ba94681932c0e5e163b381" Dec 13 07:35:30 crc kubenswrapper[5048]: I1213 07:35:30.498210 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-n9sbw_must-gather-whcvp_59caec1f-b50d-4c55-9b3f-9e646f0642ec/gather/0.log" Dec 13 07:35:38 crc kubenswrapper[5048]: I1213 07:35:38.567815 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:35:38 crc kubenswrapper[5048]: E1213 07:35:38.568854 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:35:38 crc kubenswrapper[5048]: I1213 07:35:38.918540 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-n9sbw/must-gather-whcvp"] Dec 13 07:35:38 crc kubenswrapper[5048]: I1213 07:35:38.918827 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-n9sbw/must-gather-whcvp" podUID="59caec1f-b50d-4c55-9b3f-9e646f0642ec" containerName="copy" containerID="cri-o://a76f581ee0ca568b530aea863442d1d4275aa5b57a9ad92dacf5d009cebaf42d" gracePeriod=2 Dec 13 07:35:38 crc kubenswrapper[5048]: I1213 07:35:38.929589 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-n9sbw/must-gather-whcvp"] Dec 13 07:35:39 crc kubenswrapper[5048]: I1213 07:35:39.441492 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-n9sbw_must-gather-whcvp_59caec1f-b50d-4c55-9b3f-9e646f0642ec/copy/0.log" Dec 13 07:35:39 crc kubenswrapper[5048]: I1213 07:35:39.442523 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-n9sbw/must-gather-whcvp" Dec 13 07:35:39 crc kubenswrapper[5048]: I1213 07:35:39.573512 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/59caec1f-b50d-4c55-9b3f-9e646f0642ec-must-gather-output\") pod \"59caec1f-b50d-4c55-9b3f-9e646f0642ec\" (UID: \"59caec1f-b50d-4c55-9b3f-9e646f0642ec\") " Dec 13 07:35:39 crc kubenswrapper[5048]: I1213 07:35:39.573836 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-95tnp\" (UniqueName: \"kubernetes.io/projected/59caec1f-b50d-4c55-9b3f-9e646f0642ec-kube-api-access-95tnp\") pod \"59caec1f-b50d-4c55-9b3f-9e646f0642ec\" (UID: \"59caec1f-b50d-4c55-9b3f-9e646f0642ec\") " Dec 13 07:35:39 crc kubenswrapper[5048]: I1213 07:35:39.579856 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59caec1f-b50d-4c55-9b3f-9e646f0642ec-kube-api-access-95tnp" (OuterVolumeSpecName: "kube-api-access-95tnp") pod "59caec1f-b50d-4c55-9b3f-9e646f0642ec" (UID: "59caec1f-b50d-4c55-9b3f-9e646f0642ec"). InnerVolumeSpecName "kube-api-access-95tnp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:35:39 crc kubenswrapper[5048]: I1213 07:35:39.676327 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-95tnp\" (UniqueName: \"kubernetes.io/projected/59caec1f-b50d-4c55-9b3f-9e646f0642ec-kube-api-access-95tnp\") on node \"crc\" DevicePath \"\"" Dec 13 07:35:39 crc kubenswrapper[5048]: I1213 07:35:39.723881 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59caec1f-b50d-4c55-9b3f-9e646f0642ec-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "59caec1f-b50d-4c55-9b3f-9e646f0642ec" (UID: "59caec1f-b50d-4c55-9b3f-9e646f0642ec"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:35:39 crc kubenswrapper[5048]: I1213 07:35:39.778607 5048 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/59caec1f-b50d-4c55-9b3f-9e646f0642ec-must-gather-output\") on node \"crc\" DevicePath \"\"" Dec 13 07:35:40 crc kubenswrapper[5048]: I1213 07:35:40.030878 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-n9sbw_must-gather-whcvp_59caec1f-b50d-4c55-9b3f-9e646f0642ec/copy/0.log" Dec 13 07:35:40 crc kubenswrapper[5048]: I1213 07:35:40.031647 5048 generic.go:334] "Generic (PLEG): container finished" podID="59caec1f-b50d-4c55-9b3f-9e646f0642ec" containerID="a76f581ee0ca568b530aea863442d1d4275aa5b57a9ad92dacf5d009cebaf42d" exitCode=143 Dec 13 07:35:40 crc kubenswrapper[5048]: I1213 07:35:40.031756 5048 scope.go:117] "RemoveContainer" containerID="a76f581ee0ca568b530aea863442d1d4275aa5b57a9ad92dacf5d009cebaf42d" Dec 13 07:35:40 crc kubenswrapper[5048]: I1213 07:35:40.031768 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-n9sbw/must-gather-whcvp" Dec 13 07:35:40 crc kubenswrapper[5048]: I1213 07:35:40.063799 5048 scope.go:117] "RemoveContainer" containerID="86e373bd76a759258716285640078fca42454a17b2ba94681932c0e5e163b381" Dec 13 07:35:40 crc kubenswrapper[5048]: I1213 07:35:40.168342 5048 scope.go:117] "RemoveContainer" containerID="a76f581ee0ca568b530aea863442d1d4275aa5b57a9ad92dacf5d009cebaf42d" Dec 13 07:35:40 crc kubenswrapper[5048]: E1213 07:35:40.169333 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a76f581ee0ca568b530aea863442d1d4275aa5b57a9ad92dacf5d009cebaf42d\": container with ID starting with a76f581ee0ca568b530aea863442d1d4275aa5b57a9ad92dacf5d009cebaf42d not found: ID does not exist" containerID="a76f581ee0ca568b530aea863442d1d4275aa5b57a9ad92dacf5d009cebaf42d" Dec 13 07:35:40 crc kubenswrapper[5048]: I1213 07:35:40.169375 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a76f581ee0ca568b530aea863442d1d4275aa5b57a9ad92dacf5d009cebaf42d"} err="failed to get container status \"a76f581ee0ca568b530aea863442d1d4275aa5b57a9ad92dacf5d009cebaf42d\": rpc error: code = NotFound desc = could not find container \"a76f581ee0ca568b530aea863442d1d4275aa5b57a9ad92dacf5d009cebaf42d\": container with ID starting with a76f581ee0ca568b530aea863442d1d4275aa5b57a9ad92dacf5d009cebaf42d not found: ID does not exist" Dec 13 07:35:40 crc kubenswrapper[5048]: I1213 07:35:40.169403 5048 scope.go:117] "RemoveContainer" containerID="86e373bd76a759258716285640078fca42454a17b2ba94681932c0e5e163b381" Dec 13 07:35:40 crc kubenswrapper[5048]: E1213 07:35:40.169954 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86e373bd76a759258716285640078fca42454a17b2ba94681932c0e5e163b381\": container with ID starting with 86e373bd76a759258716285640078fca42454a17b2ba94681932c0e5e163b381 not found: ID does not exist" containerID="86e373bd76a759258716285640078fca42454a17b2ba94681932c0e5e163b381" Dec 13 07:35:40 crc kubenswrapper[5048]: I1213 07:35:40.169996 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86e373bd76a759258716285640078fca42454a17b2ba94681932c0e5e163b381"} err="failed to get container status \"86e373bd76a759258716285640078fca42454a17b2ba94681932c0e5e163b381\": rpc error: code = NotFound desc = could not find container \"86e373bd76a759258716285640078fca42454a17b2ba94681932c0e5e163b381\": container with ID starting with 86e373bd76a759258716285640078fca42454a17b2ba94681932c0e5e163b381 not found: ID does not exist" Dec 13 07:35:40 crc kubenswrapper[5048]: I1213 07:35:40.579425 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59caec1f-b50d-4c55-9b3f-9e646f0642ec" path="/var/lib/kubelet/pods/59caec1f-b50d-4c55-9b3f-9e646f0642ec/volumes" Dec 13 07:35:49 crc kubenswrapper[5048]: I1213 07:35:49.567097 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:35:49 crc kubenswrapper[5048]: E1213 07:35:49.567904 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:36:03 crc kubenswrapper[5048]: I1213 07:36:03.567344 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:36:03 crc kubenswrapper[5048]: E1213 07:36:03.568183 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:36:10 crc kubenswrapper[5048]: I1213 07:36:10.959067 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-9l4zr"] Dec 13 07:36:10 crc kubenswrapper[5048]: E1213 07:36:10.960274 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59caec1f-b50d-4c55-9b3f-9e646f0642ec" containerName="gather" Dec 13 07:36:10 crc kubenswrapper[5048]: I1213 07:36:10.960296 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="59caec1f-b50d-4c55-9b3f-9e646f0642ec" containerName="gather" Dec 13 07:36:10 crc kubenswrapper[5048]: E1213 07:36:10.960318 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4a725d5-69c3-410e-8add-13fd97239e2b" containerName="registry-server" Dec 13 07:36:10 crc kubenswrapper[5048]: I1213 07:36:10.960330 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4a725d5-69c3-410e-8add-13fd97239e2b" containerName="registry-server" Dec 13 07:36:10 crc kubenswrapper[5048]: E1213 07:36:10.960370 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4a725d5-69c3-410e-8add-13fd97239e2b" containerName="extract-content" Dec 13 07:36:10 crc kubenswrapper[5048]: I1213 07:36:10.960382 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4a725d5-69c3-410e-8add-13fd97239e2b" containerName="extract-content" Dec 13 07:36:10 crc kubenswrapper[5048]: E1213 07:36:10.960405 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59caec1f-b50d-4c55-9b3f-9e646f0642ec" containerName="copy" Dec 13 07:36:10 crc kubenswrapper[5048]: I1213 07:36:10.960415 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="59caec1f-b50d-4c55-9b3f-9e646f0642ec" containerName="copy" Dec 13 07:36:10 crc kubenswrapper[5048]: E1213 07:36:10.960455 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4a725d5-69c3-410e-8add-13fd97239e2b" containerName="extract-utilities" Dec 13 07:36:10 crc kubenswrapper[5048]: I1213 07:36:10.960468 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4a725d5-69c3-410e-8add-13fd97239e2b" containerName="extract-utilities" Dec 13 07:36:10 crc kubenswrapper[5048]: I1213 07:36:10.960713 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="59caec1f-b50d-4c55-9b3f-9e646f0642ec" containerName="gather" Dec 13 07:36:10 crc kubenswrapper[5048]: I1213 07:36:10.960742 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4a725d5-69c3-410e-8add-13fd97239e2b" containerName="registry-server" Dec 13 07:36:10 crc kubenswrapper[5048]: I1213 07:36:10.960773 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="59caec1f-b50d-4c55-9b3f-9e646f0642ec" containerName="copy" Dec 13 07:36:10 crc kubenswrapper[5048]: I1213 07:36:10.962731 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9l4zr" Dec 13 07:36:10 crc kubenswrapper[5048]: I1213 07:36:10.975869 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9l4zr"] Dec 13 07:36:11 crc kubenswrapper[5048]: I1213 07:36:11.020312 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzsj4\" (UniqueName: \"kubernetes.io/projected/405bce54-d1ec-43dc-99f7-9181bb7a0a37-kube-api-access-qzsj4\") pod \"community-operators-9l4zr\" (UID: \"405bce54-d1ec-43dc-99f7-9181bb7a0a37\") " pod="openshift-marketplace/community-operators-9l4zr" Dec 13 07:36:11 crc kubenswrapper[5048]: I1213 07:36:11.020372 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/405bce54-d1ec-43dc-99f7-9181bb7a0a37-catalog-content\") pod \"community-operators-9l4zr\" (UID: \"405bce54-d1ec-43dc-99f7-9181bb7a0a37\") " pod="openshift-marketplace/community-operators-9l4zr" Dec 13 07:36:11 crc kubenswrapper[5048]: I1213 07:36:11.020540 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/405bce54-d1ec-43dc-99f7-9181bb7a0a37-utilities\") pod \"community-operators-9l4zr\" (UID: \"405bce54-d1ec-43dc-99f7-9181bb7a0a37\") " pod="openshift-marketplace/community-operators-9l4zr" Dec 13 07:36:11 crc kubenswrapper[5048]: I1213 07:36:11.122714 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzsj4\" (UniqueName: \"kubernetes.io/projected/405bce54-d1ec-43dc-99f7-9181bb7a0a37-kube-api-access-qzsj4\") pod \"community-operators-9l4zr\" (UID: \"405bce54-d1ec-43dc-99f7-9181bb7a0a37\") " pod="openshift-marketplace/community-operators-9l4zr" Dec 13 07:36:11 crc kubenswrapper[5048]: I1213 07:36:11.122778 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/405bce54-d1ec-43dc-99f7-9181bb7a0a37-catalog-content\") pod \"community-operators-9l4zr\" (UID: \"405bce54-d1ec-43dc-99f7-9181bb7a0a37\") " pod="openshift-marketplace/community-operators-9l4zr" Dec 13 07:36:11 crc kubenswrapper[5048]: I1213 07:36:11.122899 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/405bce54-d1ec-43dc-99f7-9181bb7a0a37-utilities\") pod \"community-operators-9l4zr\" (UID: \"405bce54-d1ec-43dc-99f7-9181bb7a0a37\") " pod="openshift-marketplace/community-operators-9l4zr" Dec 13 07:36:11 crc kubenswrapper[5048]: I1213 07:36:11.123498 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/405bce54-d1ec-43dc-99f7-9181bb7a0a37-utilities\") pod \"community-operators-9l4zr\" (UID: \"405bce54-d1ec-43dc-99f7-9181bb7a0a37\") " pod="openshift-marketplace/community-operators-9l4zr" Dec 13 07:36:11 crc kubenswrapper[5048]: I1213 07:36:11.123563 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/405bce54-d1ec-43dc-99f7-9181bb7a0a37-catalog-content\") pod \"community-operators-9l4zr\" (UID: \"405bce54-d1ec-43dc-99f7-9181bb7a0a37\") " pod="openshift-marketplace/community-operators-9l4zr" Dec 13 07:36:11 crc kubenswrapper[5048]: I1213 07:36:11.148859 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzsj4\" (UniqueName: \"kubernetes.io/projected/405bce54-d1ec-43dc-99f7-9181bb7a0a37-kube-api-access-qzsj4\") pod \"community-operators-9l4zr\" (UID: \"405bce54-d1ec-43dc-99f7-9181bb7a0a37\") " pod="openshift-marketplace/community-operators-9l4zr" Dec 13 07:36:11 crc kubenswrapper[5048]: I1213 07:36:11.287476 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9l4zr" Dec 13 07:36:11 crc kubenswrapper[5048]: I1213 07:36:11.773768 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9l4zr"] Dec 13 07:36:12 crc kubenswrapper[5048]: I1213 07:36:12.350773 5048 generic.go:334] "Generic (PLEG): container finished" podID="405bce54-d1ec-43dc-99f7-9181bb7a0a37" containerID="9936e8bcc44767893555b8c5d8ed07fa1290a4705af52ca1668c2cb900a6b65f" exitCode=0 Dec 13 07:36:12 crc kubenswrapper[5048]: I1213 07:36:12.350816 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9l4zr" event={"ID":"405bce54-d1ec-43dc-99f7-9181bb7a0a37","Type":"ContainerDied","Data":"9936e8bcc44767893555b8c5d8ed07fa1290a4705af52ca1668c2cb900a6b65f"} Dec 13 07:36:12 crc kubenswrapper[5048]: I1213 07:36:12.351077 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9l4zr" event={"ID":"405bce54-d1ec-43dc-99f7-9181bb7a0a37","Type":"ContainerStarted","Data":"59168af273cc1c7da8eb17ad7d4881128412339c42524aec4f18712705f5bb54"} Dec 13 07:36:18 crc kubenswrapper[5048]: I1213 07:36:18.568023 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:36:19 crc kubenswrapper[5048]: I1213 07:36:19.412385 5048 generic.go:334] "Generic (PLEG): container finished" podID="405bce54-d1ec-43dc-99f7-9181bb7a0a37" containerID="033d1a3a61854d392ca7ce72efdf8e676ae7cad2690cfa15b358b789ced563ad" exitCode=0 Dec 13 07:36:19 crc kubenswrapper[5048]: I1213 07:36:19.412459 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9l4zr" event={"ID":"405bce54-d1ec-43dc-99f7-9181bb7a0a37","Type":"ContainerDied","Data":"033d1a3a61854d392ca7ce72efdf8e676ae7cad2690cfa15b358b789ced563ad"} Dec 13 07:36:19 crc kubenswrapper[5048]: I1213 07:36:19.416472 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerStarted","Data":"39970abd8e25b0365871d71368dd87c07faf6d5fd36ea59cdd3743f467d385a2"} Dec 13 07:36:21 crc kubenswrapper[5048]: I1213 07:36:21.444944 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9l4zr" event={"ID":"405bce54-d1ec-43dc-99f7-9181bb7a0a37","Type":"ContainerStarted","Data":"193b2692d132ed9926eda5364dae5daaefe22f57b09a6fdc62b0bb44457722b5"} Dec 13 07:36:21 crc kubenswrapper[5048]: I1213 07:36:21.463986 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-9l4zr" podStartSLOduration=3.470071894 podStartE2EDuration="11.463964747s" podCreationTimestamp="2025-12-13 07:36:10 +0000 UTC" firstStartedPulling="2025-12-13 07:36:12.353946618 +0000 UTC m=+4006.220541199" lastFinishedPulling="2025-12-13 07:36:20.347839471 +0000 UTC m=+4014.214434052" observedRunningTime="2025-12-13 07:36:21.46297326 +0000 UTC m=+4015.329567841" watchObservedRunningTime="2025-12-13 07:36:21.463964747 +0000 UTC m=+4015.330559348" Dec 13 07:36:31 crc kubenswrapper[5048]: I1213 07:36:31.288151 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-9l4zr" Dec 13 07:36:31 crc kubenswrapper[5048]: I1213 07:36:31.289548 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-9l4zr" Dec 13 07:36:31 crc kubenswrapper[5048]: I1213 07:36:31.329523 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-9l4zr" Dec 13 07:36:31 crc kubenswrapper[5048]: I1213 07:36:31.858669 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-9l4zr" Dec 13 07:36:32 crc kubenswrapper[5048]: I1213 07:36:32.046750 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9l4zr"] Dec 13 07:36:32 crc kubenswrapper[5048]: I1213 07:36:32.085759 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ngg4l"] Dec 13 07:36:32 crc kubenswrapper[5048]: I1213 07:36:32.087320 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-ngg4l" podUID="2a00d4f4-f561-491b-a236-7e46f411f58e" containerName="registry-server" containerID="cri-o://349eb777fcaa45d9c516a57dd5bc9ad4bb28bf71f0f7958be0a4a27392ed9b89" gracePeriod=2 Dec 13 07:36:32 crc kubenswrapper[5048]: I1213 07:36:32.786222 5048 generic.go:334] "Generic (PLEG): container finished" podID="2a00d4f4-f561-491b-a236-7e46f411f58e" containerID="349eb777fcaa45d9c516a57dd5bc9ad4bb28bf71f0f7958be0a4a27392ed9b89" exitCode=0 Dec 13 07:36:32 crc kubenswrapper[5048]: I1213 07:36:32.786287 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ngg4l" event={"ID":"2a00d4f4-f561-491b-a236-7e46f411f58e","Type":"ContainerDied","Data":"349eb777fcaa45d9c516a57dd5bc9ad4bb28bf71f0f7958be0a4a27392ed9b89"} Dec 13 07:36:33 crc kubenswrapper[5048]: I1213 07:36:33.096827 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ngg4l" Dec 13 07:36:33 crc kubenswrapper[5048]: I1213 07:36:33.263675 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a00d4f4-f561-491b-a236-7e46f411f58e-catalog-content\") pod \"2a00d4f4-f561-491b-a236-7e46f411f58e\" (UID: \"2a00d4f4-f561-491b-a236-7e46f411f58e\") " Dec 13 07:36:33 crc kubenswrapper[5048]: I1213 07:36:33.263720 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a00d4f4-f561-491b-a236-7e46f411f58e-utilities\") pod \"2a00d4f4-f561-491b-a236-7e46f411f58e\" (UID: \"2a00d4f4-f561-491b-a236-7e46f411f58e\") " Dec 13 07:36:33 crc kubenswrapper[5048]: I1213 07:36:33.263775 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9jcvc\" (UniqueName: \"kubernetes.io/projected/2a00d4f4-f561-491b-a236-7e46f411f58e-kube-api-access-9jcvc\") pod \"2a00d4f4-f561-491b-a236-7e46f411f58e\" (UID: \"2a00d4f4-f561-491b-a236-7e46f411f58e\") " Dec 13 07:36:33 crc kubenswrapper[5048]: I1213 07:36:33.265806 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a00d4f4-f561-491b-a236-7e46f411f58e-utilities" (OuterVolumeSpecName: "utilities") pod "2a00d4f4-f561-491b-a236-7e46f411f58e" (UID: "2a00d4f4-f561-491b-a236-7e46f411f58e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:36:33 crc kubenswrapper[5048]: I1213 07:36:33.270507 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a00d4f4-f561-491b-a236-7e46f411f58e-kube-api-access-9jcvc" (OuterVolumeSpecName: "kube-api-access-9jcvc") pod "2a00d4f4-f561-491b-a236-7e46f411f58e" (UID: "2a00d4f4-f561-491b-a236-7e46f411f58e"). InnerVolumeSpecName "kube-api-access-9jcvc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:36:33 crc kubenswrapper[5048]: I1213 07:36:33.347497 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a00d4f4-f561-491b-a236-7e46f411f58e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2a00d4f4-f561-491b-a236-7e46f411f58e" (UID: "2a00d4f4-f561-491b-a236-7e46f411f58e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:36:33 crc kubenswrapper[5048]: I1213 07:36:33.366292 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a00d4f4-f561-491b-a236-7e46f411f58e-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 07:36:33 crc kubenswrapper[5048]: I1213 07:36:33.366381 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a00d4f4-f561-491b-a236-7e46f411f58e-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 07:36:33 crc kubenswrapper[5048]: I1213 07:36:33.366397 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9jcvc\" (UniqueName: \"kubernetes.io/projected/2a00d4f4-f561-491b-a236-7e46f411f58e-kube-api-access-9jcvc\") on node \"crc\" DevicePath \"\"" Dec 13 07:36:33 crc kubenswrapper[5048]: I1213 07:36:33.800132 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ngg4l" Dec 13 07:36:33 crc kubenswrapper[5048]: I1213 07:36:33.800179 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ngg4l" event={"ID":"2a00d4f4-f561-491b-a236-7e46f411f58e","Type":"ContainerDied","Data":"3f176b5c016e7053f81fe6b7c6064dce617273697f2e70deb3e75617261768df"} Dec 13 07:36:33 crc kubenswrapper[5048]: I1213 07:36:33.800527 5048 scope.go:117] "RemoveContainer" containerID="349eb777fcaa45d9c516a57dd5bc9ad4bb28bf71f0f7958be0a4a27392ed9b89" Dec 13 07:36:33 crc kubenswrapper[5048]: I1213 07:36:33.844824 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ngg4l"] Dec 13 07:36:33 crc kubenswrapper[5048]: I1213 07:36:33.848471 5048 scope.go:117] "RemoveContainer" containerID="7576d7bcc0a89a26d2d386ca0bbbf4387bfd74da35a1d9f22496dd77a671a8af" Dec 13 07:36:33 crc kubenswrapper[5048]: I1213 07:36:33.858646 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-ngg4l"] Dec 13 07:36:33 crc kubenswrapper[5048]: I1213 07:36:33.874125 5048 scope.go:117] "RemoveContainer" containerID="3c70defbccbd1be1ff38854122eb900c054c62ac49307698f6deea3bc622ac66" Dec 13 07:36:34 crc kubenswrapper[5048]: I1213 07:36:34.592108 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a00d4f4-f561-491b-a236-7e46f411f58e" path="/var/lib/kubelet/pods/2a00d4f4-f561-491b-a236-7e46f411f58e/volumes" Dec 13 07:36:55 crc kubenswrapper[5048]: I1213 07:36:55.915078 5048 scope.go:117] "RemoveContainer" containerID="c9aab5d85a946dc914c5d7b490c14fc6565c4ae081abeb8832653f596f21dd2c" Dec 13 07:37:56 crc kubenswrapper[5048]: I1213 07:37:56.017538 5048 scope.go:117] "RemoveContainer" containerID="8083add98a8dba457cea4d79bf51e1d3f0ea72acc8392066fd6ab03d1fbf9b13" Dec 13 07:38:00 crc kubenswrapper[5048]: I1213 07:38:00.673559 5048 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-6c7b8f495-lq789" podUID="9d13568e-517b-46ae-b3bd-dfa6ee7b671a" containerName="proxy-server" probeResult="failure" output="HTTP probe failed with statuscode: 502" Dec 13 07:38:34 crc kubenswrapper[5048]: I1213 07:38:34.241256 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-8djz2/must-gather-llm8s"] Dec 13 07:38:34 crc kubenswrapper[5048]: E1213 07:38:34.243525 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a00d4f4-f561-491b-a236-7e46f411f58e" containerName="extract-content" Dec 13 07:38:34 crc kubenswrapper[5048]: I1213 07:38:34.243642 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a00d4f4-f561-491b-a236-7e46f411f58e" containerName="extract-content" Dec 13 07:38:34 crc kubenswrapper[5048]: E1213 07:38:34.243750 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a00d4f4-f561-491b-a236-7e46f411f58e" containerName="extract-utilities" Dec 13 07:38:34 crc kubenswrapper[5048]: I1213 07:38:34.243851 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a00d4f4-f561-491b-a236-7e46f411f58e" containerName="extract-utilities" Dec 13 07:38:34 crc kubenswrapper[5048]: E1213 07:38:34.243950 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a00d4f4-f561-491b-a236-7e46f411f58e" containerName="registry-server" Dec 13 07:38:34 crc kubenswrapper[5048]: I1213 07:38:34.244024 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a00d4f4-f561-491b-a236-7e46f411f58e" containerName="registry-server" Dec 13 07:38:34 crc kubenswrapper[5048]: I1213 07:38:34.244317 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a00d4f4-f561-491b-a236-7e46f411f58e" containerName="registry-server" Dec 13 07:38:34 crc kubenswrapper[5048]: I1213 07:38:34.245660 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8djz2/must-gather-llm8s" Dec 13 07:38:34 crc kubenswrapper[5048]: I1213 07:38:34.251524 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-8djz2"/"kube-root-ca.crt" Dec 13 07:38:34 crc kubenswrapper[5048]: I1213 07:38:34.251811 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-8djz2"/"default-dockercfg-mbmn6" Dec 13 07:38:34 crc kubenswrapper[5048]: I1213 07:38:34.253576 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-8djz2"/"openshift-service-ca.crt" Dec 13 07:38:34 crc kubenswrapper[5048]: I1213 07:38:34.298648 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-8djz2/must-gather-llm8s"] Dec 13 07:38:34 crc kubenswrapper[5048]: I1213 07:38:34.349820 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3a336011-c6da-4a8e-a04b-d1981a139f23-must-gather-output\") pod \"must-gather-llm8s\" (UID: \"3a336011-c6da-4a8e-a04b-d1981a139f23\") " pod="openshift-must-gather-8djz2/must-gather-llm8s" Dec 13 07:38:34 crc kubenswrapper[5048]: I1213 07:38:34.349920 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttjdw\" (UniqueName: \"kubernetes.io/projected/3a336011-c6da-4a8e-a04b-d1981a139f23-kube-api-access-ttjdw\") pod \"must-gather-llm8s\" (UID: \"3a336011-c6da-4a8e-a04b-d1981a139f23\") " pod="openshift-must-gather-8djz2/must-gather-llm8s" Dec 13 07:38:34 crc kubenswrapper[5048]: I1213 07:38:34.456062 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3a336011-c6da-4a8e-a04b-d1981a139f23-must-gather-output\") pod \"must-gather-llm8s\" (UID: \"3a336011-c6da-4a8e-a04b-d1981a139f23\") " pod="openshift-must-gather-8djz2/must-gather-llm8s" Dec 13 07:38:34 crc kubenswrapper[5048]: I1213 07:38:34.456153 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttjdw\" (UniqueName: \"kubernetes.io/projected/3a336011-c6da-4a8e-a04b-d1981a139f23-kube-api-access-ttjdw\") pod \"must-gather-llm8s\" (UID: \"3a336011-c6da-4a8e-a04b-d1981a139f23\") " pod="openshift-must-gather-8djz2/must-gather-llm8s" Dec 13 07:38:34 crc kubenswrapper[5048]: I1213 07:38:34.456692 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3a336011-c6da-4a8e-a04b-d1981a139f23-must-gather-output\") pod \"must-gather-llm8s\" (UID: \"3a336011-c6da-4a8e-a04b-d1981a139f23\") " pod="openshift-must-gather-8djz2/must-gather-llm8s" Dec 13 07:38:34 crc kubenswrapper[5048]: I1213 07:38:34.480688 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttjdw\" (UniqueName: \"kubernetes.io/projected/3a336011-c6da-4a8e-a04b-d1981a139f23-kube-api-access-ttjdw\") pod \"must-gather-llm8s\" (UID: \"3a336011-c6da-4a8e-a04b-d1981a139f23\") " pod="openshift-must-gather-8djz2/must-gather-llm8s" Dec 13 07:38:34 crc kubenswrapper[5048]: I1213 07:38:34.575318 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8djz2/must-gather-llm8s" Dec 13 07:38:35 crc kubenswrapper[5048]: I1213 07:38:35.054713 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-8djz2/must-gather-llm8s"] Dec 13 07:38:36 crc kubenswrapper[5048]: I1213 07:38:36.071050 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8djz2/must-gather-llm8s" event={"ID":"3a336011-c6da-4a8e-a04b-d1981a139f23","Type":"ContainerStarted","Data":"632808db1d8bfe0077a442e6b55424f63358fe38306cf75539e3982ee918fdef"} Dec 13 07:38:36 crc kubenswrapper[5048]: I1213 07:38:36.071681 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8djz2/must-gather-llm8s" event={"ID":"3a336011-c6da-4a8e-a04b-d1981a139f23","Type":"ContainerStarted","Data":"75ba6958a187d82f97c76197f34c82c62372684a1345291f0a5b87f869f7d96f"} Dec 13 07:38:36 crc kubenswrapper[5048]: I1213 07:38:36.071699 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8djz2/must-gather-llm8s" event={"ID":"3a336011-c6da-4a8e-a04b-d1981a139f23","Type":"ContainerStarted","Data":"ba0abb5d50f88b27389f6c70308aa4f4f96efb00f8330989091f7673c04ddf9b"} Dec 13 07:38:36 crc kubenswrapper[5048]: I1213 07:38:36.091244 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-8djz2/must-gather-llm8s" podStartSLOduration=2.091217404 podStartE2EDuration="2.091217404s" podCreationTimestamp="2025-12-13 07:38:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 07:38:36.083926305 +0000 UTC m=+4149.950520976" watchObservedRunningTime="2025-12-13 07:38:36.091217404 +0000 UTC m=+4149.957811985" Dec 13 07:38:38 crc kubenswrapper[5048]: E1213 07:38:38.900981 5048 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.251:42548->38.102.83.251:43335: write tcp 38.102.83.251:42548->38.102.83.251:43335: write: broken pipe Dec 13 07:38:39 crc kubenswrapper[5048]: I1213 07:38:39.685037 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-8djz2/crc-debug-8cgh4"] Dec 13 07:38:39 crc kubenswrapper[5048]: I1213 07:38:39.686814 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8djz2/crc-debug-8cgh4" Dec 13 07:38:39 crc kubenswrapper[5048]: I1213 07:38:39.816160 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e7b505d1-1b5f-41dc-9c02-81915edac7e4-host\") pod \"crc-debug-8cgh4\" (UID: \"e7b505d1-1b5f-41dc-9c02-81915edac7e4\") " pod="openshift-must-gather-8djz2/crc-debug-8cgh4" Dec 13 07:38:39 crc kubenswrapper[5048]: I1213 07:38:39.816233 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmtqf\" (UniqueName: \"kubernetes.io/projected/e7b505d1-1b5f-41dc-9c02-81915edac7e4-kube-api-access-zmtqf\") pod \"crc-debug-8cgh4\" (UID: \"e7b505d1-1b5f-41dc-9c02-81915edac7e4\") " pod="openshift-must-gather-8djz2/crc-debug-8cgh4" Dec 13 07:38:39 crc kubenswrapper[5048]: I1213 07:38:39.918133 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e7b505d1-1b5f-41dc-9c02-81915edac7e4-host\") pod \"crc-debug-8cgh4\" (UID: \"e7b505d1-1b5f-41dc-9c02-81915edac7e4\") " pod="openshift-must-gather-8djz2/crc-debug-8cgh4" Dec 13 07:38:39 crc kubenswrapper[5048]: I1213 07:38:39.918298 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e7b505d1-1b5f-41dc-9c02-81915edac7e4-host\") pod \"crc-debug-8cgh4\" (UID: \"e7b505d1-1b5f-41dc-9c02-81915edac7e4\") " pod="openshift-must-gather-8djz2/crc-debug-8cgh4" Dec 13 07:38:39 crc kubenswrapper[5048]: I1213 07:38:39.918323 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmtqf\" (UniqueName: \"kubernetes.io/projected/e7b505d1-1b5f-41dc-9c02-81915edac7e4-kube-api-access-zmtqf\") pod \"crc-debug-8cgh4\" (UID: \"e7b505d1-1b5f-41dc-9c02-81915edac7e4\") " pod="openshift-must-gather-8djz2/crc-debug-8cgh4" Dec 13 07:38:39 crc kubenswrapper[5048]: I1213 07:38:39.941580 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmtqf\" (UniqueName: \"kubernetes.io/projected/e7b505d1-1b5f-41dc-9c02-81915edac7e4-kube-api-access-zmtqf\") pod \"crc-debug-8cgh4\" (UID: \"e7b505d1-1b5f-41dc-9c02-81915edac7e4\") " pod="openshift-must-gather-8djz2/crc-debug-8cgh4" Dec 13 07:38:40 crc kubenswrapper[5048]: I1213 07:38:40.007129 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8djz2/crc-debug-8cgh4" Dec 13 07:38:40 crc kubenswrapper[5048]: W1213 07:38:40.036326 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode7b505d1_1b5f_41dc_9c02_81915edac7e4.slice/crio-5a19a3cdb6f04492d7acc0a7f741290fc40ad2749012ab995702095938a5b365 WatchSource:0}: Error finding container 5a19a3cdb6f04492d7acc0a7f741290fc40ad2749012ab995702095938a5b365: Status 404 returned error can't find the container with id 5a19a3cdb6f04492d7acc0a7f741290fc40ad2749012ab995702095938a5b365 Dec 13 07:38:40 crc kubenswrapper[5048]: I1213 07:38:40.102473 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8djz2/crc-debug-8cgh4" event={"ID":"e7b505d1-1b5f-41dc-9c02-81915edac7e4","Type":"ContainerStarted","Data":"5a19a3cdb6f04492d7acc0a7f741290fc40ad2749012ab995702095938a5b365"} Dec 13 07:38:41 crc kubenswrapper[5048]: I1213 07:38:41.117083 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8djz2/crc-debug-8cgh4" event={"ID":"e7b505d1-1b5f-41dc-9c02-81915edac7e4","Type":"ContainerStarted","Data":"69f79b0584fcc03324fef0a07d9d0b22e76e9748462197d49829d87d571deb4c"} Dec 13 07:38:41 crc kubenswrapper[5048]: I1213 07:38:41.132257 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-8djz2/crc-debug-8cgh4" podStartSLOduration=2.132240291 podStartE2EDuration="2.132240291s" podCreationTimestamp="2025-12-13 07:38:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 07:38:41.127378839 +0000 UTC m=+4154.993973420" watchObservedRunningTime="2025-12-13 07:38:41.132240291 +0000 UTC m=+4154.998834872" Dec 13 07:38:46 crc kubenswrapper[5048]: I1213 07:38:46.215912 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:38:46 crc kubenswrapper[5048]: I1213 07:38:46.217699 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:38:48 crc kubenswrapper[5048]: I1213 07:38:48.121629 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-l6ndc"] Dec 13 07:38:48 crc kubenswrapper[5048]: I1213 07:38:48.125133 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l6ndc" Dec 13 07:38:48 crc kubenswrapper[5048]: I1213 07:38:48.143619 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l6ndc"] Dec 13 07:38:48 crc kubenswrapper[5048]: I1213 07:38:48.272390 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf-catalog-content\") pod \"redhat-marketplace-l6ndc\" (UID: \"3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf\") " pod="openshift-marketplace/redhat-marketplace-l6ndc" Dec 13 07:38:48 crc kubenswrapper[5048]: I1213 07:38:48.272608 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dgdlp\" (UniqueName: \"kubernetes.io/projected/3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf-kube-api-access-dgdlp\") pod \"redhat-marketplace-l6ndc\" (UID: \"3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf\") " pod="openshift-marketplace/redhat-marketplace-l6ndc" Dec 13 07:38:48 crc kubenswrapper[5048]: I1213 07:38:48.272711 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf-utilities\") pod \"redhat-marketplace-l6ndc\" (UID: \"3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf\") " pod="openshift-marketplace/redhat-marketplace-l6ndc" Dec 13 07:38:48 crc kubenswrapper[5048]: I1213 07:38:48.374474 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf-utilities\") pod \"redhat-marketplace-l6ndc\" (UID: \"3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf\") " pod="openshift-marketplace/redhat-marketplace-l6ndc" Dec 13 07:38:48 crc kubenswrapper[5048]: I1213 07:38:48.374881 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf-catalog-content\") pod \"redhat-marketplace-l6ndc\" (UID: \"3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf\") " pod="openshift-marketplace/redhat-marketplace-l6ndc" Dec 13 07:38:48 crc kubenswrapper[5048]: I1213 07:38:48.374967 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dgdlp\" (UniqueName: \"kubernetes.io/projected/3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf-kube-api-access-dgdlp\") pod \"redhat-marketplace-l6ndc\" (UID: \"3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf\") " pod="openshift-marketplace/redhat-marketplace-l6ndc" Dec 13 07:38:48 crc kubenswrapper[5048]: I1213 07:38:48.374965 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf-utilities\") pod \"redhat-marketplace-l6ndc\" (UID: \"3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf\") " pod="openshift-marketplace/redhat-marketplace-l6ndc" Dec 13 07:38:48 crc kubenswrapper[5048]: I1213 07:38:48.375345 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf-catalog-content\") pod \"redhat-marketplace-l6ndc\" (UID: \"3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf\") " pod="openshift-marketplace/redhat-marketplace-l6ndc" Dec 13 07:38:48 crc kubenswrapper[5048]: I1213 07:38:48.406802 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dgdlp\" (UniqueName: \"kubernetes.io/projected/3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf-kube-api-access-dgdlp\") pod \"redhat-marketplace-l6ndc\" (UID: \"3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf\") " pod="openshift-marketplace/redhat-marketplace-l6ndc" Dec 13 07:38:48 crc kubenswrapper[5048]: I1213 07:38:48.451754 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l6ndc" Dec 13 07:38:48 crc kubenswrapper[5048]: I1213 07:38:48.969696 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l6ndc"] Dec 13 07:38:48 crc kubenswrapper[5048]: W1213 07:38:48.977234 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3a16cf3a_a9bd_4767_99a8_59cc3b78d7cf.slice/crio-999807e4fbed93df7815535053d7d76346ac28643fd69ddfeaaef52c5f73e195 WatchSource:0}: Error finding container 999807e4fbed93df7815535053d7d76346ac28643fd69ddfeaaef52c5f73e195: Status 404 returned error can't find the container with id 999807e4fbed93df7815535053d7d76346ac28643fd69ddfeaaef52c5f73e195 Dec 13 07:38:49 crc kubenswrapper[5048]: I1213 07:38:49.212560 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l6ndc" event={"ID":"3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf","Type":"ContainerStarted","Data":"999807e4fbed93df7815535053d7d76346ac28643fd69ddfeaaef52c5f73e195"} Dec 13 07:38:50 crc kubenswrapper[5048]: I1213 07:38:50.222128 5048 generic.go:334] "Generic (PLEG): container finished" podID="3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf" containerID="7a6356f9aa24623d1f3fe96f5e709ca498d96f3b08ace92c74a4ffef5ac3f67a" exitCode=0 Dec 13 07:38:50 crc kubenswrapper[5048]: I1213 07:38:50.222244 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l6ndc" event={"ID":"3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf","Type":"ContainerDied","Data":"7a6356f9aa24623d1f3fe96f5e709ca498d96f3b08ace92c74a4ffef5ac3f67a"} Dec 13 07:38:50 crc kubenswrapper[5048]: I1213 07:38:50.229236 5048 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 13 07:38:53 crc kubenswrapper[5048]: I1213 07:38:53.254766 5048 generic.go:334] "Generic (PLEG): container finished" podID="3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf" containerID="ca90178e8cc0bcb8c44b2c3ea83ba14ea03b4e556cd25e5495bb40aa53882ed5" exitCode=0 Dec 13 07:38:53 crc kubenswrapper[5048]: I1213 07:38:53.254880 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l6ndc" event={"ID":"3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf","Type":"ContainerDied","Data":"ca90178e8cc0bcb8c44b2c3ea83ba14ea03b4e556cd25e5495bb40aa53882ed5"} Dec 13 07:38:56 crc kubenswrapper[5048]: I1213 07:38:56.291028 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l6ndc" event={"ID":"3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf","Type":"ContainerStarted","Data":"e63013087ae561db6411ad5d8f0b453f84643c7eb1080eef37253ba6337236d0"} Dec 13 07:38:56 crc kubenswrapper[5048]: I1213 07:38:56.327564 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-l6ndc" podStartSLOduration=4.295035992 podStartE2EDuration="8.327543373s" podCreationTimestamp="2025-12-13 07:38:48 +0000 UTC" firstStartedPulling="2025-12-13 07:38:50.228999243 +0000 UTC m=+4164.095593824" lastFinishedPulling="2025-12-13 07:38:54.261506624 +0000 UTC m=+4168.128101205" observedRunningTime="2025-12-13 07:38:56.323305647 +0000 UTC m=+4170.189900228" watchObservedRunningTime="2025-12-13 07:38:56.327543373 +0000 UTC m=+4170.194137964" Dec 13 07:38:58 crc kubenswrapper[5048]: I1213 07:38:58.452831 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-l6ndc" Dec 13 07:38:58 crc kubenswrapper[5048]: I1213 07:38:58.453666 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-l6ndc" Dec 13 07:38:58 crc kubenswrapper[5048]: I1213 07:38:58.496458 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-l6ndc" Dec 13 07:39:00 crc kubenswrapper[5048]: I1213 07:39:00.388655 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-l6ndc" Dec 13 07:39:00 crc kubenswrapper[5048]: I1213 07:39:00.447037 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l6ndc"] Dec 13 07:39:02 crc kubenswrapper[5048]: I1213 07:39:02.339649 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-l6ndc" podUID="3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf" containerName="registry-server" containerID="cri-o://e63013087ae561db6411ad5d8f0b453f84643c7eb1080eef37253ba6337236d0" gracePeriod=2 Dec 13 07:39:02 crc kubenswrapper[5048]: I1213 07:39:02.840498 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l6ndc" Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.003934 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dgdlp\" (UniqueName: \"kubernetes.io/projected/3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf-kube-api-access-dgdlp\") pod \"3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf\" (UID: \"3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf\") " Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.004053 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf-catalog-content\") pod \"3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf\" (UID: \"3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf\") " Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.004112 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf-utilities\") pod \"3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf\" (UID: \"3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf\") " Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.005243 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf-utilities" (OuterVolumeSpecName: "utilities") pod "3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf" (UID: "3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.009648 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf-kube-api-access-dgdlp" (OuterVolumeSpecName: "kube-api-access-dgdlp") pod "3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf" (UID: "3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf"). InnerVolumeSpecName "kube-api-access-dgdlp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.030725 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf" (UID: "3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.106260 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.106293 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dgdlp\" (UniqueName: \"kubernetes.io/projected/3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf-kube-api-access-dgdlp\") on node \"crc\" DevicePath \"\"" Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.106304 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.370252 5048 generic.go:334] "Generic (PLEG): container finished" podID="3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf" containerID="e63013087ae561db6411ad5d8f0b453f84643c7eb1080eef37253ba6337236d0" exitCode=0 Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.370322 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l6ndc" event={"ID":"3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf","Type":"ContainerDied","Data":"e63013087ae561db6411ad5d8f0b453f84643c7eb1080eef37253ba6337236d0"} Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.370361 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l6ndc" event={"ID":"3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf","Type":"ContainerDied","Data":"999807e4fbed93df7815535053d7d76346ac28643fd69ddfeaaef52c5f73e195"} Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.370382 5048 scope.go:117] "RemoveContainer" containerID="e63013087ae561db6411ad5d8f0b453f84643c7eb1080eef37253ba6337236d0" Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.371636 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l6ndc" Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.393118 5048 scope.go:117] "RemoveContainer" containerID="ca90178e8cc0bcb8c44b2c3ea83ba14ea03b4e556cd25e5495bb40aa53882ed5" Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.427665 5048 scope.go:117] "RemoveContainer" containerID="7a6356f9aa24623d1f3fe96f5e709ca498d96f3b08ace92c74a4ffef5ac3f67a" Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.435542 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l6ndc"] Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.448364 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-l6ndc"] Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.480670 5048 scope.go:117] "RemoveContainer" containerID="e63013087ae561db6411ad5d8f0b453f84643c7eb1080eef37253ba6337236d0" Dec 13 07:39:03 crc kubenswrapper[5048]: E1213 07:39:03.481363 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e63013087ae561db6411ad5d8f0b453f84643c7eb1080eef37253ba6337236d0\": container with ID starting with e63013087ae561db6411ad5d8f0b453f84643c7eb1080eef37253ba6337236d0 not found: ID does not exist" containerID="e63013087ae561db6411ad5d8f0b453f84643c7eb1080eef37253ba6337236d0" Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.481401 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e63013087ae561db6411ad5d8f0b453f84643c7eb1080eef37253ba6337236d0"} err="failed to get container status \"e63013087ae561db6411ad5d8f0b453f84643c7eb1080eef37253ba6337236d0\": rpc error: code = NotFound desc = could not find container \"e63013087ae561db6411ad5d8f0b453f84643c7eb1080eef37253ba6337236d0\": container with ID starting with e63013087ae561db6411ad5d8f0b453f84643c7eb1080eef37253ba6337236d0 not found: ID does not exist" Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.481428 5048 scope.go:117] "RemoveContainer" containerID="ca90178e8cc0bcb8c44b2c3ea83ba14ea03b4e556cd25e5495bb40aa53882ed5" Dec 13 07:39:03 crc kubenswrapper[5048]: E1213 07:39:03.481924 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca90178e8cc0bcb8c44b2c3ea83ba14ea03b4e556cd25e5495bb40aa53882ed5\": container with ID starting with ca90178e8cc0bcb8c44b2c3ea83ba14ea03b4e556cd25e5495bb40aa53882ed5 not found: ID does not exist" containerID="ca90178e8cc0bcb8c44b2c3ea83ba14ea03b4e556cd25e5495bb40aa53882ed5" Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.481957 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca90178e8cc0bcb8c44b2c3ea83ba14ea03b4e556cd25e5495bb40aa53882ed5"} err="failed to get container status \"ca90178e8cc0bcb8c44b2c3ea83ba14ea03b4e556cd25e5495bb40aa53882ed5\": rpc error: code = NotFound desc = could not find container \"ca90178e8cc0bcb8c44b2c3ea83ba14ea03b4e556cd25e5495bb40aa53882ed5\": container with ID starting with ca90178e8cc0bcb8c44b2c3ea83ba14ea03b4e556cd25e5495bb40aa53882ed5 not found: ID does not exist" Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.481976 5048 scope.go:117] "RemoveContainer" containerID="7a6356f9aa24623d1f3fe96f5e709ca498d96f3b08ace92c74a4ffef5ac3f67a" Dec 13 07:39:03 crc kubenswrapper[5048]: E1213 07:39:03.482311 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a6356f9aa24623d1f3fe96f5e709ca498d96f3b08ace92c74a4ffef5ac3f67a\": container with ID starting with 7a6356f9aa24623d1f3fe96f5e709ca498d96f3b08ace92c74a4ffef5ac3f67a not found: ID does not exist" containerID="7a6356f9aa24623d1f3fe96f5e709ca498d96f3b08ace92c74a4ffef5ac3f67a" Dec 13 07:39:03 crc kubenswrapper[5048]: I1213 07:39:03.482336 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a6356f9aa24623d1f3fe96f5e709ca498d96f3b08ace92c74a4ffef5ac3f67a"} err="failed to get container status \"7a6356f9aa24623d1f3fe96f5e709ca498d96f3b08ace92c74a4ffef5ac3f67a\": rpc error: code = NotFound desc = could not find container \"7a6356f9aa24623d1f3fe96f5e709ca498d96f3b08ace92c74a4ffef5ac3f67a\": container with ID starting with 7a6356f9aa24623d1f3fe96f5e709ca498d96f3b08ace92c74a4ffef5ac3f67a not found: ID does not exist" Dec 13 07:39:04 crc kubenswrapper[5048]: I1213 07:39:04.576546 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf" path="/var/lib/kubelet/pods/3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf/volumes" Dec 13 07:39:16 crc kubenswrapper[5048]: I1213 07:39:16.216043 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:39:16 crc kubenswrapper[5048]: I1213 07:39:16.216667 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:39:19 crc kubenswrapper[5048]: I1213 07:39:19.521784 5048 generic.go:334] "Generic (PLEG): container finished" podID="e7b505d1-1b5f-41dc-9c02-81915edac7e4" containerID="69f79b0584fcc03324fef0a07d9d0b22e76e9748462197d49829d87d571deb4c" exitCode=0 Dec 13 07:39:19 crc kubenswrapper[5048]: I1213 07:39:19.521837 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8djz2/crc-debug-8cgh4" event={"ID":"e7b505d1-1b5f-41dc-9c02-81915edac7e4","Type":"ContainerDied","Data":"69f79b0584fcc03324fef0a07d9d0b22e76e9748462197d49829d87d571deb4c"} Dec 13 07:39:20 crc kubenswrapper[5048]: I1213 07:39:20.626133 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8djz2/crc-debug-8cgh4" Dec 13 07:39:20 crc kubenswrapper[5048]: I1213 07:39:20.662738 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-8djz2/crc-debug-8cgh4"] Dec 13 07:39:20 crc kubenswrapper[5048]: I1213 07:39:20.673166 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-8djz2/crc-debug-8cgh4"] Dec 13 07:39:20 crc kubenswrapper[5048]: I1213 07:39:20.695416 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e7b505d1-1b5f-41dc-9c02-81915edac7e4-host\") pod \"e7b505d1-1b5f-41dc-9c02-81915edac7e4\" (UID: \"e7b505d1-1b5f-41dc-9c02-81915edac7e4\") " Dec 13 07:39:20 crc kubenswrapper[5048]: I1213 07:39:20.695791 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zmtqf\" (UniqueName: \"kubernetes.io/projected/e7b505d1-1b5f-41dc-9c02-81915edac7e4-kube-api-access-zmtqf\") pod \"e7b505d1-1b5f-41dc-9c02-81915edac7e4\" (UID: \"e7b505d1-1b5f-41dc-9c02-81915edac7e4\") " Dec 13 07:39:20 crc kubenswrapper[5048]: I1213 07:39:20.695513 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e7b505d1-1b5f-41dc-9c02-81915edac7e4-host" (OuterVolumeSpecName: "host") pod "e7b505d1-1b5f-41dc-9c02-81915edac7e4" (UID: "e7b505d1-1b5f-41dc-9c02-81915edac7e4"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 07:39:20 crc kubenswrapper[5048]: I1213 07:39:20.696505 5048 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e7b505d1-1b5f-41dc-9c02-81915edac7e4-host\") on node \"crc\" DevicePath \"\"" Dec 13 07:39:20 crc kubenswrapper[5048]: I1213 07:39:20.702234 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7b505d1-1b5f-41dc-9c02-81915edac7e4-kube-api-access-zmtqf" (OuterVolumeSpecName: "kube-api-access-zmtqf") pod "e7b505d1-1b5f-41dc-9c02-81915edac7e4" (UID: "e7b505d1-1b5f-41dc-9c02-81915edac7e4"). InnerVolumeSpecName "kube-api-access-zmtqf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:39:20 crc kubenswrapper[5048]: I1213 07:39:20.798383 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zmtqf\" (UniqueName: \"kubernetes.io/projected/e7b505d1-1b5f-41dc-9c02-81915edac7e4-kube-api-access-zmtqf\") on node \"crc\" DevicePath \"\"" Dec 13 07:39:21 crc kubenswrapper[5048]: I1213 07:39:21.540197 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5a19a3cdb6f04492d7acc0a7f741290fc40ad2749012ab995702095938a5b365" Dec 13 07:39:21 crc kubenswrapper[5048]: I1213 07:39:21.540823 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8djz2/crc-debug-8cgh4" Dec 13 07:39:21 crc kubenswrapper[5048]: I1213 07:39:21.846691 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-8djz2/crc-debug-zxr6j"] Dec 13 07:39:21 crc kubenswrapper[5048]: E1213 07:39:21.847348 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf" containerName="extract-utilities" Dec 13 07:39:21 crc kubenswrapper[5048]: I1213 07:39:21.847361 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf" containerName="extract-utilities" Dec 13 07:39:21 crc kubenswrapper[5048]: E1213 07:39:21.847391 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf" containerName="extract-content" Dec 13 07:39:21 crc kubenswrapper[5048]: I1213 07:39:21.847397 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf" containerName="extract-content" Dec 13 07:39:21 crc kubenswrapper[5048]: E1213 07:39:21.847408 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf" containerName="registry-server" Dec 13 07:39:21 crc kubenswrapper[5048]: I1213 07:39:21.847415 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf" containerName="registry-server" Dec 13 07:39:21 crc kubenswrapper[5048]: E1213 07:39:21.847425 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7b505d1-1b5f-41dc-9c02-81915edac7e4" containerName="container-00" Dec 13 07:39:21 crc kubenswrapper[5048]: I1213 07:39:21.847562 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7b505d1-1b5f-41dc-9c02-81915edac7e4" containerName="container-00" Dec 13 07:39:21 crc kubenswrapper[5048]: I1213 07:39:21.847756 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7b505d1-1b5f-41dc-9c02-81915edac7e4" containerName="container-00" Dec 13 07:39:21 crc kubenswrapper[5048]: I1213 07:39:21.847770 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a16cf3a-a9bd-4767-99a8-59cc3b78d7cf" containerName="registry-server" Dec 13 07:39:21 crc kubenswrapper[5048]: I1213 07:39:21.848518 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8djz2/crc-debug-zxr6j" Dec 13 07:39:21 crc kubenswrapper[5048]: I1213 07:39:21.921234 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c3182889-83cf-4ec6-a4b2-387bab6f5dcb-host\") pod \"crc-debug-zxr6j\" (UID: \"c3182889-83cf-4ec6-a4b2-387bab6f5dcb\") " pod="openshift-must-gather-8djz2/crc-debug-zxr6j" Dec 13 07:39:21 crc kubenswrapper[5048]: I1213 07:39:21.921521 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vh2jq\" (UniqueName: \"kubernetes.io/projected/c3182889-83cf-4ec6-a4b2-387bab6f5dcb-kube-api-access-vh2jq\") pod \"crc-debug-zxr6j\" (UID: \"c3182889-83cf-4ec6-a4b2-387bab6f5dcb\") " pod="openshift-must-gather-8djz2/crc-debug-zxr6j" Dec 13 07:39:22 crc kubenswrapper[5048]: I1213 07:39:22.023911 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c3182889-83cf-4ec6-a4b2-387bab6f5dcb-host\") pod \"crc-debug-zxr6j\" (UID: \"c3182889-83cf-4ec6-a4b2-387bab6f5dcb\") " pod="openshift-must-gather-8djz2/crc-debug-zxr6j" Dec 13 07:39:22 crc kubenswrapper[5048]: I1213 07:39:22.024033 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vh2jq\" (UniqueName: \"kubernetes.io/projected/c3182889-83cf-4ec6-a4b2-387bab6f5dcb-kube-api-access-vh2jq\") pod \"crc-debug-zxr6j\" (UID: \"c3182889-83cf-4ec6-a4b2-387bab6f5dcb\") " pod="openshift-must-gather-8djz2/crc-debug-zxr6j" Dec 13 07:39:22 crc kubenswrapper[5048]: I1213 07:39:22.024068 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c3182889-83cf-4ec6-a4b2-387bab6f5dcb-host\") pod \"crc-debug-zxr6j\" (UID: \"c3182889-83cf-4ec6-a4b2-387bab6f5dcb\") " pod="openshift-must-gather-8djz2/crc-debug-zxr6j" Dec 13 07:39:22 crc kubenswrapper[5048]: I1213 07:39:22.049162 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vh2jq\" (UniqueName: \"kubernetes.io/projected/c3182889-83cf-4ec6-a4b2-387bab6f5dcb-kube-api-access-vh2jq\") pod \"crc-debug-zxr6j\" (UID: \"c3182889-83cf-4ec6-a4b2-387bab6f5dcb\") " pod="openshift-must-gather-8djz2/crc-debug-zxr6j" Dec 13 07:39:22 crc kubenswrapper[5048]: I1213 07:39:22.165735 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8djz2/crc-debug-zxr6j" Dec 13 07:39:22 crc kubenswrapper[5048]: I1213 07:39:22.554984 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8djz2/crc-debug-zxr6j" event={"ID":"c3182889-83cf-4ec6-a4b2-387bab6f5dcb","Type":"ContainerStarted","Data":"d8db1fb3cd920f17e00c0c58a07321f3e2dd6e5ecf64aeff7e9a5c68d691c2fb"} Dec 13 07:39:22 crc kubenswrapper[5048]: I1213 07:39:22.580012 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7b505d1-1b5f-41dc-9c02-81915edac7e4" path="/var/lib/kubelet/pods/e7b505d1-1b5f-41dc-9c02-81915edac7e4/volumes" Dec 13 07:39:23 crc kubenswrapper[5048]: I1213 07:39:23.564477 5048 generic.go:334] "Generic (PLEG): container finished" podID="c3182889-83cf-4ec6-a4b2-387bab6f5dcb" containerID="092c56d82f2b7003358dc49e20ffa6777ab6134c4f98ef1008b6f60adf31403f" exitCode=0 Dec 13 07:39:23 crc kubenswrapper[5048]: I1213 07:39:23.564518 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8djz2/crc-debug-zxr6j" event={"ID":"c3182889-83cf-4ec6-a4b2-387bab6f5dcb","Type":"ContainerDied","Data":"092c56d82f2b7003358dc49e20ffa6777ab6134c4f98ef1008b6f60adf31403f"} Dec 13 07:39:24 crc kubenswrapper[5048]: I1213 07:39:24.086779 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-8djz2/crc-debug-zxr6j"] Dec 13 07:39:24 crc kubenswrapper[5048]: I1213 07:39:24.100778 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-8djz2/crc-debug-zxr6j"] Dec 13 07:39:24 crc kubenswrapper[5048]: I1213 07:39:24.708488 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8djz2/crc-debug-zxr6j" Dec 13 07:39:24 crc kubenswrapper[5048]: I1213 07:39:24.795808 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c3182889-83cf-4ec6-a4b2-387bab6f5dcb-host\") pod \"c3182889-83cf-4ec6-a4b2-387bab6f5dcb\" (UID: \"c3182889-83cf-4ec6-a4b2-387bab6f5dcb\") " Dec 13 07:39:24 crc kubenswrapper[5048]: I1213 07:39:24.795947 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c3182889-83cf-4ec6-a4b2-387bab6f5dcb-host" (OuterVolumeSpecName: "host") pod "c3182889-83cf-4ec6-a4b2-387bab6f5dcb" (UID: "c3182889-83cf-4ec6-a4b2-387bab6f5dcb"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 07:39:24 crc kubenswrapper[5048]: I1213 07:39:24.796463 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vh2jq\" (UniqueName: \"kubernetes.io/projected/c3182889-83cf-4ec6-a4b2-387bab6f5dcb-kube-api-access-vh2jq\") pod \"c3182889-83cf-4ec6-a4b2-387bab6f5dcb\" (UID: \"c3182889-83cf-4ec6-a4b2-387bab6f5dcb\") " Dec 13 07:39:24 crc kubenswrapper[5048]: I1213 07:39:24.796944 5048 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c3182889-83cf-4ec6-a4b2-387bab6f5dcb-host\") on node \"crc\" DevicePath \"\"" Dec 13 07:39:24 crc kubenswrapper[5048]: I1213 07:39:24.804968 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3182889-83cf-4ec6-a4b2-387bab6f5dcb-kube-api-access-vh2jq" (OuterVolumeSpecName: "kube-api-access-vh2jq") pod "c3182889-83cf-4ec6-a4b2-387bab6f5dcb" (UID: "c3182889-83cf-4ec6-a4b2-387bab6f5dcb"). InnerVolumeSpecName "kube-api-access-vh2jq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:39:24 crc kubenswrapper[5048]: I1213 07:39:24.898950 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vh2jq\" (UniqueName: \"kubernetes.io/projected/c3182889-83cf-4ec6-a4b2-387bab6f5dcb-kube-api-access-vh2jq\") on node \"crc\" DevicePath \"\"" Dec 13 07:39:25 crc kubenswrapper[5048]: I1213 07:39:25.317974 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-8djz2/crc-debug-tgchk"] Dec 13 07:39:25 crc kubenswrapper[5048]: E1213 07:39:25.318530 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3182889-83cf-4ec6-a4b2-387bab6f5dcb" containerName="container-00" Dec 13 07:39:25 crc kubenswrapper[5048]: I1213 07:39:25.318555 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3182889-83cf-4ec6-a4b2-387bab6f5dcb" containerName="container-00" Dec 13 07:39:25 crc kubenswrapper[5048]: I1213 07:39:25.318767 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3182889-83cf-4ec6-a4b2-387bab6f5dcb" containerName="container-00" Dec 13 07:39:25 crc kubenswrapper[5048]: I1213 07:39:25.319510 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8djz2/crc-debug-tgchk" Dec 13 07:39:25 crc kubenswrapper[5048]: I1213 07:39:25.407100 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gsdw8\" (UniqueName: \"kubernetes.io/projected/ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6-kube-api-access-gsdw8\") pod \"crc-debug-tgchk\" (UID: \"ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6\") " pod="openshift-must-gather-8djz2/crc-debug-tgchk" Dec 13 07:39:25 crc kubenswrapper[5048]: I1213 07:39:25.407349 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6-host\") pod \"crc-debug-tgchk\" (UID: \"ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6\") " pod="openshift-must-gather-8djz2/crc-debug-tgchk" Dec 13 07:39:25 crc kubenswrapper[5048]: I1213 07:39:25.951865 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6-host\") pod \"crc-debug-tgchk\" (UID: \"ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6\") " pod="openshift-must-gather-8djz2/crc-debug-tgchk" Dec 13 07:39:25 crc kubenswrapper[5048]: I1213 07:39:25.952250 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gsdw8\" (UniqueName: \"kubernetes.io/projected/ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6-kube-api-access-gsdw8\") pod \"crc-debug-tgchk\" (UID: \"ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6\") " pod="openshift-must-gather-8djz2/crc-debug-tgchk" Dec 13 07:39:25 crc kubenswrapper[5048]: I1213 07:39:25.952796 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6-host\") pod \"crc-debug-tgchk\" (UID: \"ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6\") " pod="openshift-must-gather-8djz2/crc-debug-tgchk" Dec 13 07:39:25 crc kubenswrapper[5048]: I1213 07:39:25.968101 5048 scope.go:117] "RemoveContainer" containerID="092c56d82f2b7003358dc49e20ffa6777ab6134c4f98ef1008b6f60adf31403f" Dec 13 07:39:25 crc kubenswrapper[5048]: I1213 07:39:25.968300 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8djz2/crc-debug-zxr6j" Dec 13 07:39:26 crc kubenswrapper[5048]: I1213 07:39:26.281683 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsdw8\" (UniqueName: \"kubernetes.io/projected/ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6-kube-api-access-gsdw8\") pod \"crc-debug-tgchk\" (UID: \"ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6\") " pod="openshift-must-gather-8djz2/crc-debug-tgchk" Dec 13 07:39:26 crc kubenswrapper[5048]: I1213 07:39:26.539001 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8djz2/crc-debug-tgchk" Dec 13 07:39:26 crc kubenswrapper[5048]: W1213 07:39:26.565643 5048 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podff826bac_310b_4e92_a5c9_7c0fd6a2fbb6.slice/crio-ffacc1546d6bb8172e63fb39e81724a9c02299c4659e4ddfc195223f1f8e9d1c WatchSource:0}: Error finding container ffacc1546d6bb8172e63fb39e81724a9c02299c4659e4ddfc195223f1f8e9d1c: Status 404 returned error can't find the container with id ffacc1546d6bb8172e63fb39e81724a9c02299c4659e4ddfc195223f1f8e9d1c Dec 13 07:39:26 crc kubenswrapper[5048]: I1213 07:39:26.583315 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3182889-83cf-4ec6-a4b2-387bab6f5dcb" path="/var/lib/kubelet/pods/c3182889-83cf-4ec6-a4b2-387bab6f5dcb/volumes" Dec 13 07:39:26 crc kubenswrapper[5048]: I1213 07:39:26.980334 5048 generic.go:334] "Generic (PLEG): container finished" podID="ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6" containerID="6092d65a7c7c9297d9081edf800268a58655881c20955954ab0e61de74b24b5b" exitCode=0 Dec 13 07:39:26 crc kubenswrapper[5048]: I1213 07:39:26.980368 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8djz2/crc-debug-tgchk" event={"ID":"ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6","Type":"ContainerDied","Data":"6092d65a7c7c9297d9081edf800268a58655881c20955954ab0e61de74b24b5b"} Dec 13 07:39:26 crc kubenswrapper[5048]: I1213 07:39:26.980392 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8djz2/crc-debug-tgchk" event={"ID":"ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6","Type":"ContainerStarted","Data":"ffacc1546d6bb8172e63fb39e81724a9c02299c4659e4ddfc195223f1f8e9d1c"} Dec 13 07:39:27 crc kubenswrapper[5048]: I1213 07:39:27.023632 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-8djz2/crc-debug-tgchk"] Dec 13 07:39:27 crc kubenswrapper[5048]: I1213 07:39:27.035509 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-8djz2/crc-debug-tgchk"] Dec 13 07:39:28 crc kubenswrapper[5048]: I1213 07:39:28.263176 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8djz2/crc-debug-tgchk" Dec 13 07:39:28 crc kubenswrapper[5048]: I1213 07:39:28.304812 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gsdw8\" (UniqueName: \"kubernetes.io/projected/ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6-kube-api-access-gsdw8\") pod \"ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6\" (UID: \"ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6\") " Dec 13 07:39:28 crc kubenswrapper[5048]: I1213 07:39:28.305070 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6-host\") pod \"ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6\" (UID: \"ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6\") " Dec 13 07:39:28 crc kubenswrapper[5048]: I1213 07:39:28.305851 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6-host" (OuterVolumeSpecName: "host") pod "ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6" (UID: "ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 13 07:39:28 crc kubenswrapper[5048]: I1213 07:39:28.311422 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6-kube-api-access-gsdw8" (OuterVolumeSpecName: "kube-api-access-gsdw8") pod "ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6" (UID: "ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6"). InnerVolumeSpecName "kube-api-access-gsdw8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:39:28 crc kubenswrapper[5048]: I1213 07:39:28.407154 5048 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6-host\") on node \"crc\" DevicePath \"\"" Dec 13 07:39:28 crc kubenswrapper[5048]: I1213 07:39:28.407472 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gsdw8\" (UniqueName: \"kubernetes.io/projected/ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6-kube-api-access-gsdw8\") on node \"crc\" DevicePath \"\"" Dec 13 07:39:28 crc kubenswrapper[5048]: I1213 07:39:28.577657 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6" path="/var/lib/kubelet/pods/ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6/volumes" Dec 13 07:39:29 crc kubenswrapper[5048]: I1213 07:39:29.000787 5048 scope.go:117] "RemoveContainer" containerID="6092d65a7c7c9297d9081edf800268a58655881c20955954ab0e61de74b24b5b" Dec 13 07:39:29 crc kubenswrapper[5048]: I1213 07:39:29.000867 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8djz2/crc-debug-tgchk" Dec 13 07:39:46 crc kubenswrapper[5048]: I1213 07:39:46.215401 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:39:46 crc kubenswrapper[5048]: I1213 07:39:46.216059 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:39:46 crc kubenswrapper[5048]: I1213 07:39:46.216137 5048 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 07:39:46 crc kubenswrapper[5048]: I1213 07:39:46.217156 5048 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"39970abd8e25b0365871d71368dd87c07faf6d5fd36ea59cdd3743f467d385a2"} pod="openshift-machine-config-operator/machine-config-daemon-j7hns" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 13 07:39:46 crc kubenswrapper[5048]: I1213 07:39:46.217253 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" containerID="cri-o://39970abd8e25b0365871d71368dd87c07faf6d5fd36ea59cdd3743f467d385a2" gracePeriod=600 Dec 13 07:39:47 crc kubenswrapper[5048]: I1213 07:39:47.164847 5048 generic.go:334] "Generic (PLEG): container finished" podID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerID="39970abd8e25b0365871d71368dd87c07faf6d5fd36ea59cdd3743f467d385a2" exitCode=0 Dec 13 07:39:47 crc kubenswrapper[5048]: I1213 07:39:47.164976 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerDied","Data":"39970abd8e25b0365871d71368dd87c07faf6d5fd36ea59cdd3743f467d385a2"} Dec 13 07:39:47 crc kubenswrapper[5048]: I1213 07:39:47.165937 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerStarted","Data":"a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5"} Dec 13 07:39:47 crc kubenswrapper[5048]: I1213 07:39:47.165995 5048 scope.go:117] "RemoveContainer" containerID="816d7f6445997a7cd4a07b0fe58ad5d757b1c38f7ec8f2c73ef48fbeaa2dc8c7" Dec 13 07:39:49 crc kubenswrapper[5048]: I1213 07:39:49.956050 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-94459c6fd-6dkbd_dca6704a-bfa6-42db-9692-f6b21a2c9e08/barbican-api/0.log" Dec 13 07:39:50 crc kubenswrapper[5048]: I1213 07:39:50.090837 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-94459c6fd-6dkbd_dca6704a-bfa6-42db-9692-f6b21a2c9e08/barbican-api-log/0.log" Dec 13 07:39:50 crc kubenswrapper[5048]: I1213 07:39:50.227945 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-7dd9cf6646-xj92w_ff1df1f6-e2a6-4e70-b352-f3b15b9255d7/barbican-keystone-listener/0.log" Dec 13 07:39:50 crc kubenswrapper[5048]: I1213 07:39:50.242016 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-7dd9cf6646-xj92w_ff1df1f6-e2a6-4e70-b352-f3b15b9255d7/barbican-keystone-listener-log/0.log" Dec 13 07:39:50 crc kubenswrapper[5048]: I1213 07:39:50.366124 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-7888dc9665-xrmff_ec726365-6964-4e11-942d-d57482573f01/barbican-worker/0.log" Dec 13 07:39:50 crc kubenswrapper[5048]: I1213 07:39:50.404245 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-7888dc9665-xrmff_ec726365-6964-4e11-942d-d57482573f01/barbican-worker-log/0.log" Dec 13 07:39:50 crc kubenswrapper[5048]: I1213 07:39:50.585904 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_a6d85513-7b75-40e7-9eae-08544cccbc55/ceilometer-central-agent/0.log" Dec 13 07:39:50 crc kubenswrapper[5048]: I1213 07:39:50.644741 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-4flcx_868a604c-3a79-4945-b54e-950797bed05d/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:39:50 crc kubenswrapper[5048]: I1213 07:39:50.700661 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_a6d85513-7b75-40e7-9eae-08544cccbc55/ceilometer-notification-agent/0.log" Dec 13 07:39:50 crc kubenswrapper[5048]: I1213 07:39:50.754579 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_a6d85513-7b75-40e7-9eae-08544cccbc55/sg-core/0.log" Dec 13 07:39:50 crc kubenswrapper[5048]: I1213 07:39:50.773122 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_a6d85513-7b75-40e7-9eae-08544cccbc55/proxy-httpd/0.log" Dec 13 07:39:50 crc kubenswrapper[5048]: I1213 07:39:50.931022 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_2341822f-d44f-47a2-a543-655dc0b26866/cinder-api-log/0.log" Dec 13 07:39:50 crc kubenswrapper[5048]: I1213 07:39:50.944055 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_2341822f-d44f-47a2-a543-655dc0b26866/cinder-api/0.log" Dec 13 07:39:51 crc kubenswrapper[5048]: I1213 07:39:51.133360 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_e215aafd-55f0-449e-9886-fa2b93d7fd83/cinder-scheduler/0.log" Dec 13 07:39:51 crc kubenswrapper[5048]: I1213 07:39:51.148619 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_e215aafd-55f0-449e-9886-fa2b93d7fd83/probe/0.log" Dec 13 07:39:51 crc kubenswrapper[5048]: I1213 07:39:51.235541 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-vhsrc_7df2d78c-502d-4d7c-9233-cf01992cab77/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:39:51 crc kubenswrapper[5048]: I1213 07:39:51.357023 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-wcwx7_cfd8e6ef-8724-4363-8a56-71f2b0f24f15/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:39:51 crc kubenswrapper[5048]: I1213 07:39:51.457700 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-zws2g_40c89533-f85e-4c8f-9826-f1affe855947/init/0.log" Dec 13 07:39:51 crc kubenswrapper[5048]: I1213 07:39:51.621169 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-zws2g_40c89533-f85e-4c8f-9826-f1affe855947/init/0.log" Dec 13 07:39:51 crc kubenswrapper[5048]: I1213 07:39:51.671856 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-j46dh_78eb5a3b-7802-4507-bb35-37cc2e8edb56/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:39:51 crc kubenswrapper[5048]: I1213 07:39:51.675035 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-zws2g_40c89533-f85e-4c8f-9826-f1affe855947/dnsmasq-dns/0.log" Dec 13 07:39:51 crc kubenswrapper[5048]: I1213 07:39:51.833295 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_c33951ff-f856-420b-90e1-6f776931b17e/glance-httpd/0.log" Dec 13 07:39:51 crc kubenswrapper[5048]: I1213 07:39:51.869633 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_c33951ff-f856-420b-90e1-6f776931b17e/glance-log/0.log" Dec 13 07:39:52 crc kubenswrapper[5048]: I1213 07:39:52.025577 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_be68c40d-a83c-40f4-ab3b-4f50f64aae15/glance-httpd/0.log" Dec 13 07:39:52 crc kubenswrapper[5048]: I1213 07:39:52.048348 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_be68c40d-a83c-40f4-ab3b-4f50f64aae15/glance-log/0.log" Dec 13 07:39:52 crc kubenswrapper[5048]: I1213 07:39:52.191752 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-657fc95f76-vznd4_1a49463c-d974-4631-b6ef-3f88d734ac2d/horizon/0.log" Dec 13 07:39:52 crc kubenswrapper[5048]: I1213 07:39:52.350969 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-r8rlz_a6c582a0-39fc-4d6c-aa84-367f11c30ff1/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:39:52 crc kubenswrapper[5048]: I1213 07:39:52.540731 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-5bk98_58f90e59-a2c6-4099-b5eb-6a35c0448a1f/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:39:52 crc kubenswrapper[5048]: I1213 07:39:52.653738 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-657fc95f76-vznd4_1a49463c-d974-4631-b6ef-3f88d734ac2d/horizon-log/0.log" Dec 13 07:39:52 crc kubenswrapper[5048]: I1213 07:39:52.793768 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-5c89574bc9-gw9cl_d339d78d-798f-4147-85f3-87e7a05515dc/keystone-api/0.log" Dec 13 07:39:52 crc kubenswrapper[5048]: I1213 07:39:52.852554 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29426821-gd94t_6748fa72-aa27-4717-9434-2a06950c519a/keystone-cron/0.log" Dec 13 07:39:52 crc kubenswrapper[5048]: I1213 07:39:52.960367 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_c1e7f131-1d2e-41de-8cba-e54b383324c5/kube-state-metrics/0.log" Dec 13 07:39:53 crc kubenswrapper[5048]: I1213 07:39:53.088625 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-47k9k_482402ba-adeb-4175-911a-2ab863e44d4e/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:39:53 crc kubenswrapper[5048]: I1213 07:39:53.463642 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6465fd554f-k9lkr_4a6223fc-c3a5-462d-b61d-ebd353bbe7ca/neutron-api/0.log" Dec 13 07:39:53 crc kubenswrapper[5048]: I1213 07:39:53.467991 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6465fd554f-k9lkr_4a6223fc-c3a5-462d-b61d-ebd353bbe7ca/neutron-httpd/0.log" Dec 13 07:39:53 crc kubenswrapper[5048]: I1213 07:39:53.734069 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-rcvjb_967349d1-5d27-480d-8e31-2eaa33e3c7e0/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:39:54 crc kubenswrapper[5048]: I1213 07:39:54.181495 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_1e4cb8ce-de40-4d30-beb4-fa5a0dae4e1e/nova-cell0-conductor-conductor/0.log" Dec 13 07:39:54 crc kubenswrapper[5048]: I1213 07:39:54.307262 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_40f60c37-7e8e-4a30-9d38-26296975a60c/nova-api-log/0.log" Dec 13 07:39:54 crc kubenswrapper[5048]: I1213 07:39:54.646475 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_40f60c37-7e8e-4a30-9d38-26296975a60c/nova-api-api/0.log" Dec 13 07:39:54 crc kubenswrapper[5048]: I1213 07:39:54.648837 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_219c43b9-12af-4e67-9c5e-93b2e83623b1/nova-cell1-conductor-conductor/0.log" Dec 13 07:39:54 crc kubenswrapper[5048]: I1213 07:39:54.686066 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_6646f183-ffd9-4870-a202-85003939acd6/nova-cell1-novncproxy-novncproxy/0.log" Dec 13 07:39:54 crc kubenswrapper[5048]: I1213 07:39:54.931981 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-flsxx_e460e258-aa7f-4839-9443-50b9afe4557b/nova-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:39:54 crc kubenswrapper[5048]: I1213 07:39:54.999147 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_b9502006-ca4a-4a71-90ef-4f86311c70fc/nova-metadata-log/0.log" Dec 13 07:39:55 crc kubenswrapper[5048]: I1213 07:39:55.345736 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_be46506c-41d0-4b9f-92bd-f34eb6d6a1aa/mysql-bootstrap/0.log" Dec 13 07:39:55 crc kubenswrapper[5048]: I1213 07:39:55.394711 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_bf17e7ba-3f68-4aca-ae8e-fb82f10f18d4/nova-scheduler-scheduler/0.log" Dec 13 07:39:55 crc kubenswrapper[5048]: I1213 07:39:55.593718 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_be46506c-41d0-4b9f-92bd-f34eb6d6a1aa/galera/0.log" Dec 13 07:39:55 crc kubenswrapper[5048]: I1213 07:39:55.605903 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_be46506c-41d0-4b9f-92bd-f34eb6d6a1aa/mysql-bootstrap/0.log" Dec 13 07:39:55 crc kubenswrapper[5048]: I1213 07:39:55.799461 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_a5870a17-c845-46b8-a25c-8f8822a93cb8/mysql-bootstrap/0.log" Dec 13 07:39:56 crc kubenswrapper[5048]: I1213 07:39:56.006286 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_a5870a17-c845-46b8-a25c-8f8822a93cb8/galera/0.log" Dec 13 07:39:56 crc kubenswrapper[5048]: I1213 07:39:56.006859 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_a5870a17-c845-46b8-a25c-8f8822a93cb8/mysql-bootstrap/0.log" Dec 13 07:39:56 crc kubenswrapper[5048]: I1213 07:39:56.509737 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_b9502006-ca4a-4a71-90ef-4f86311c70fc/nova-metadata-metadata/0.log" Dec 13 07:39:56 crc kubenswrapper[5048]: I1213 07:39:56.741643 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_c0fd6a3a-5c9b-4d74-bfba-719758182b08/openstackclient/0.log" Dec 13 07:39:56 crc kubenswrapper[5048]: I1213 07:39:56.880357 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-2sh28_a8258a39-dbbb-4672-9d88-22749f0c9563/ovn-controller/0.log" Dec 13 07:39:56 crc kubenswrapper[5048]: I1213 07:39:56.985586 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-l4wtt_0da0172c-5a76-44fc-8ff1-e694ba1e083b/openstack-network-exporter/0.log" Dec 13 07:39:57 crc kubenswrapper[5048]: I1213 07:39:57.125220 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-x5dfv_df1e9604-d8b9-4e08-8eb1-9c30b73f6d70/ovsdb-server-init/0.log" Dec 13 07:39:57 crc kubenswrapper[5048]: I1213 07:39:57.284266 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-x5dfv_df1e9604-d8b9-4e08-8eb1-9c30b73f6d70/ovsdb-server-init/0.log" Dec 13 07:39:57 crc kubenswrapper[5048]: I1213 07:39:57.291791 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-x5dfv_df1e9604-d8b9-4e08-8eb1-9c30b73f6d70/ovs-vswitchd/0.log" Dec 13 07:39:57 crc kubenswrapper[5048]: I1213 07:39:57.313110 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-x5dfv_df1e9604-d8b9-4e08-8eb1-9c30b73f6d70/ovsdb-server/0.log" Dec 13 07:39:57 crc kubenswrapper[5048]: I1213 07:39:57.518561 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-s7hbt_c0376236-384b-44b9-abbb-a1fe41557a88/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:39:57 crc kubenswrapper[5048]: I1213 07:39:57.528271 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_3f002b19-41eb-4af1-a6c1-a4639e81417e/openstack-network-exporter/0.log" Dec 13 07:39:57 crc kubenswrapper[5048]: I1213 07:39:57.630922 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_3f002b19-41eb-4af1-a6c1-a4639e81417e/ovn-northd/0.log" Dec 13 07:39:57 crc kubenswrapper[5048]: I1213 07:39:57.829569 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_8c7d8f14-b731-4408-b506-2dac81b2a0a7/openstack-network-exporter/0.log" Dec 13 07:39:57 crc kubenswrapper[5048]: I1213 07:39:57.902986 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_8c7d8f14-b731-4408-b506-2dac81b2a0a7/ovsdbserver-nb/0.log" Dec 13 07:39:58 crc kubenswrapper[5048]: I1213 07:39:58.438273 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_932081d6-9a70-45ec-8738-f0b1265a2a84/ovsdbserver-sb/0.log" Dec 13 07:39:58 crc kubenswrapper[5048]: I1213 07:39:58.455039 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_932081d6-9a70-45ec-8738-f0b1265a2a84/openstack-network-exporter/0.log" Dec 13 07:39:58 crc kubenswrapper[5048]: I1213 07:39:58.573738 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-7b9cc68dcb-22pkt_fb3e7c2a-93fb-4bf8-8447-6d4be22a0760/placement-api/0.log" Dec 13 07:39:58 crc kubenswrapper[5048]: I1213 07:39:58.731142 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-7b9cc68dcb-22pkt_fb3e7c2a-93fb-4bf8-8447-6d4be22a0760/placement-log/0.log" Dec 13 07:39:58 crc kubenswrapper[5048]: I1213 07:39:58.828286 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b6586a43-004c-41f4-9172-b3b385849341/setup-container/0.log" Dec 13 07:39:58 crc kubenswrapper[5048]: I1213 07:39:58.949517 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b6586a43-004c-41f4-9172-b3b385849341/rabbitmq/0.log" Dec 13 07:39:58 crc kubenswrapper[5048]: I1213 07:39:58.982983 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b6586a43-004c-41f4-9172-b3b385849341/setup-container/0.log" Dec 13 07:39:59 crc kubenswrapper[5048]: I1213 07:39:59.069277 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_cd2c9077-4969-4d54-a677-2f84128c1a13/setup-container/0.log" Dec 13 07:39:59 crc kubenswrapper[5048]: I1213 07:39:59.260742 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_cd2c9077-4969-4d54-a677-2f84128c1a13/setup-container/0.log" Dec 13 07:39:59 crc kubenswrapper[5048]: I1213 07:39:59.294810 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_cd2c9077-4969-4d54-a677-2f84128c1a13/rabbitmq/0.log" Dec 13 07:39:59 crc kubenswrapper[5048]: I1213 07:39:59.309521 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-mpgnb_360c2d75-bc2a-408a-bfa8-4c250e32d6ab/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:39:59 crc kubenswrapper[5048]: I1213 07:39:59.475029 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-rrx8r_db18a520-b418-4ee9-bed9-d72c023c9959/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:39:59 crc kubenswrapper[5048]: I1213 07:39:59.556558 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-2tt6c_29c60e6c-e671-43eb-ad63-2ccf40ef5719/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:39:59 crc kubenswrapper[5048]: I1213 07:39:59.732813 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-42g5n_bc147090-461f-4896-a08e-59dddc7c14cc/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:39:59 crc kubenswrapper[5048]: I1213 07:39:59.811792 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-mwrz4_a097705b-8b45-470f-8026-744ebdc4083a/ssh-known-hosts-edpm-deployment/0.log" Dec 13 07:39:59 crc kubenswrapper[5048]: I1213 07:39:59.977990 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6c7b8f495-lq789_9d13568e-517b-46ae-b3bd-dfa6ee7b671a/proxy-server/0.log" Dec 13 07:40:00 crc kubenswrapper[5048]: I1213 07:40:00.106717 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6c7b8f495-lq789_9d13568e-517b-46ae-b3bd-dfa6ee7b671a/proxy-httpd/0.log" Dec 13 07:40:00 crc kubenswrapper[5048]: I1213 07:40:00.156288 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-xs94r_76f71d51-d887-428e-bcf0-e07a75cda134/swift-ring-rebalance/0.log" Dec 13 07:40:00 crc kubenswrapper[5048]: I1213 07:40:00.280254 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/account-auditor/0.log" Dec 13 07:40:00 crc kubenswrapper[5048]: I1213 07:40:00.334931 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/account-reaper/0.log" Dec 13 07:40:00 crc kubenswrapper[5048]: I1213 07:40:00.453793 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/account-replicator/0.log" Dec 13 07:40:00 crc kubenswrapper[5048]: I1213 07:40:00.491003 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/container-auditor/0.log" Dec 13 07:40:00 crc kubenswrapper[5048]: I1213 07:40:00.509515 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/account-server/0.log" Dec 13 07:40:00 crc kubenswrapper[5048]: I1213 07:40:00.566482 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/container-replicator/0.log" Dec 13 07:40:00 crc kubenswrapper[5048]: I1213 07:40:00.650850 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/container-server/0.log" Dec 13 07:40:00 crc kubenswrapper[5048]: I1213 07:40:00.729281 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/container-updater/0.log" Dec 13 07:40:00 crc kubenswrapper[5048]: I1213 07:40:00.735379 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/object-auditor/0.log" Dec 13 07:40:00 crc kubenswrapper[5048]: I1213 07:40:00.781738 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/object-expirer/0.log" Dec 13 07:40:00 crc kubenswrapper[5048]: I1213 07:40:00.889928 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/object-replicator/0.log" Dec 13 07:40:00 crc kubenswrapper[5048]: I1213 07:40:00.940476 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/object-updater/0.log" Dec 13 07:40:00 crc kubenswrapper[5048]: I1213 07:40:00.955996 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/object-server/0.log" Dec 13 07:40:01 crc kubenswrapper[5048]: I1213 07:40:01.061383 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/rsync/0.log" Dec 13 07:40:01 crc kubenswrapper[5048]: I1213 07:40:01.115676 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7f3766b-7c8f-43ed-bf8e-4cd4f6cbe2a4/swift-recon-cron/0.log" Dec 13 07:40:01 crc kubenswrapper[5048]: I1213 07:40:01.198963 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-gcjqf_13b74976-c0e2-4461-a564-de6ce88aa549/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:40:01 crc kubenswrapper[5048]: I1213 07:40:01.318304 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_b1419e46-ce93-452b-9d88-b9a50e9dbfe6/tempest-tests-tempest-tests-runner/0.log" Dec 13 07:40:01 crc kubenswrapper[5048]: I1213 07:40:01.418492 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_036a76e2-0363-4fe5-98fc-283eea6536e6/test-operator-logs-container/0.log" Dec 13 07:40:01 crc kubenswrapper[5048]: I1213 07:40:01.560282 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-pw7g5_6a5d7f38-72b9-4092-a948-c775ce64d40c/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Dec 13 07:40:11 crc kubenswrapper[5048]: I1213 07:40:11.481853 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_c468ccc6-8384-4a69-ae41-cca18f9233e3/memcached/0.log" Dec 13 07:40:29 crc kubenswrapper[5048]: I1213 07:40:29.965529 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc_35d3bce1-ed03-4919-b1eb-09b5a630a578/util/0.log" Dec 13 07:40:30 crc kubenswrapper[5048]: I1213 07:40:30.142154 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc_35d3bce1-ed03-4919-b1eb-09b5a630a578/pull/0.log" Dec 13 07:40:30 crc kubenswrapper[5048]: I1213 07:40:30.176760 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc_35d3bce1-ed03-4919-b1eb-09b5a630a578/util/0.log" Dec 13 07:40:30 crc kubenswrapper[5048]: I1213 07:40:30.190511 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc_35d3bce1-ed03-4919-b1eb-09b5a630a578/pull/0.log" Dec 13 07:40:30 crc kubenswrapper[5048]: I1213 07:40:30.344016 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc_35d3bce1-ed03-4919-b1eb-09b5a630a578/pull/0.log" Dec 13 07:40:30 crc kubenswrapper[5048]: I1213 07:40:30.352315 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc_35d3bce1-ed03-4919-b1eb-09b5a630a578/util/0.log" Dec 13 07:40:30 crc kubenswrapper[5048]: I1213 07:40:30.362867 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_57226e5b57de4f342b8572ffc23f8bfe592719adaf06c04e6c86cf630fsrpkc_35d3bce1-ed03-4919-b1eb-09b5a630a578/extract/0.log" Dec 13 07:40:30 crc kubenswrapper[5048]: I1213 07:40:30.620330 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-5cf45c46bd-mblxt_6d515ed0-b2e1-469e-a7c5-bbe62664979e/manager/0.log" Dec 13 07:40:30 crc kubenswrapper[5048]: I1213 07:40:30.650817 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-95949466-xmb6d_50877cb8-07a0-48c4-af3d-72144fa836e0/manager/0.log" Dec 13 07:40:30 crc kubenswrapper[5048]: I1213 07:40:30.918854 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-66f8b87655-ll9d4_83f0165e-ed2b-436c-9ae0-e871bd291638/manager/0.log" Dec 13 07:40:30 crc kubenswrapper[5048]: I1213 07:40:30.923711 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-767f9d7567-md4jk_4d953dd1-d113-4dc6-a80b-3ded9d08b476/manager/0.log" Dec 13 07:40:31 crc kubenswrapper[5048]: I1213 07:40:31.090415 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-59b8dcb766-sn85z_07f258ba-5ff1-4d34-8e07-62c024c15dba/manager/0.log" Dec 13 07:40:31 crc kubenswrapper[5048]: I1213 07:40:31.123892 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-6ccf486b9-g6ggd_9bd1a72e-ad49-46ae-a748-a21d05114b84/manager/0.log" Dec 13 07:40:31 crc kubenswrapper[5048]: I1213 07:40:31.367115 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-f458558d7-xxj48_b47e0c4b-dd06-4d55-b3d7-8e3d968df8e6/manager/0.log" Dec 13 07:40:31 crc kubenswrapper[5048]: I1213 07:40:31.494668 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-7cf9bd88b6-wswvl_d1a74609-2a57-44a5-8e88-dab8ae7fba98/manager/0.log" Dec 13 07:40:31 crc kubenswrapper[5048]: I1213 07:40:31.618414 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-5c7cbf548f-fdf2l_148d1633-90ad-45da-af0b-5b182ee41795/manager/0.log" Dec 13 07:40:31 crc kubenswrapper[5048]: I1213 07:40:31.622037 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5fdd9786f7-mkjnl_c58196eb-9b95-450a-98ff-d852ff7125c5/manager/0.log" Dec 13 07:40:31 crc kubenswrapper[5048]: I1213 07:40:31.824921 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-f76f4954c-bpgwj_1994b961-1801-4279-8c61-a901803b4a3a/manager/0.log" Dec 13 07:40:31 crc kubenswrapper[5048]: I1213 07:40:31.870630 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7cd87b778f-zgrtn_72736371-ff33-45e1-a685-9e2f89dcec60/manager/0.log" Dec 13 07:40:32 crc kubenswrapper[5048]: I1213 07:40:32.075673 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-68c649d9d-gqsj6_659a6b7b-7ef9-4fc2-8ea4-4298020aa94c/manager/0.log" Dec 13 07:40:32 crc kubenswrapper[5048]: I1213 07:40:32.084634 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-5fbbf8b6cc-fklqp_201cc161-1f13-498b-b99a-0d9c91bdc15a/manager/0.log" Dec 13 07:40:32 crc kubenswrapper[5048]: I1213 07:40:32.256115 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-7b67c7f6c5c7878_db14d0ba-f68b-48a5-b69a-97399548fca1/manager/0.log" Dec 13 07:40:32 crc kubenswrapper[5048]: I1213 07:40:32.686450 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-576c4d554c-lf2gp_46a5447f-e917-4c59-8735-db6e8dce1527/operator/0.log" Dec 13 07:40:32 crc kubenswrapper[5048]: I1213 07:40:32.758552 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-qlml5_60f70325-e4e1-4fdc-ba21-c92b6ed5967e/registry-server/0.log" Dec 13 07:40:33 crc kubenswrapper[5048]: I1213 07:40:33.034198 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-bf6d4f946-4ck8m_859bbc21-6f39-4712-a04f-4473b78b32eb/manager/0.log" Dec 13 07:40:33 crc kubenswrapper[5048]: I1213 07:40:33.112535 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-8665b56d78-g782d_6d4d57f0-ff75-455d-8e8a-fe4947b3ee40/manager/0.log" Dec 13 07:40:33 crc kubenswrapper[5048]: I1213 07:40:33.247746 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-fvxqc_4b1fbe4c-27c1-4456-bf01-6a42320cb63d/operator/0.log" Dec 13 07:40:33 crc kubenswrapper[5048]: I1213 07:40:33.373284 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-58d7cfb75d-nsmnm_fea58380-4304-485b-aefc-48f9baea4126/manager/0.log" Dec 13 07:40:33 crc kubenswrapper[5048]: I1213 07:40:33.484338 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5c6df8f9-sx7sf_813f57d5-1063-4dbc-9847-b6ea97e46fbe/manager/0.log" Dec 13 07:40:33 crc kubenswrapper[5048]: I1213 07:40:33.608539 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-97d456b9-2bg97_89f4ffab-8c61-4389-8f41-43cd8e2d54de/manager/0.log" Dec 13 07:40:33 crc kubenswrapper[5048]: I1213 07:40:33.695120 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-756ccf86c7-dv9ww_a046de13-f7f7-4d7c-abf3-79ed8cc60fad/manager/0.log" Dec 13 07:40:33 crc kubenswrapper[5048]: I1213 07:40:33.765039 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-55f78b7c4c-b4f6l_7165a29e-88bf-4194-bffa-414a675d1be5/manager/0.log" Dec 13 07:40:54 crc kubenswrapper[5048]: I1213 07:40:54.832161 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-x5sc8_81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4/kube-rbac-proxy/0.log" Dec 13 07:40:54 crc kubenswrapper[5048]: I1213 07:40:54.859770 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-l5kdh_cdcbb504-e2d6-4511-bf24-d18ba641f45b/control-plane-machine-set-operator/0.log" Dec 13 07:40:54 crc kubenswrapper[5048]: I1213 07:40:54.981322 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-x5sc8_81dcc1e9-b7ab-4d9e-8a4a-6bdc087877a4/machine-api-operator/0.log" Dec 13 07:41:08 crc kubenswrapper[5048]: I1213 07:41:08.986982 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-tb8lp_fe89c6cd-2d34-47e7-9ff7-cba95295e680/cert-manager-controller/0.log" Dec 13 07:41:09 crc kubenswrapper[5048]: I1213 07:41:09.195416 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-m5zdt_2a9b7a7a-7741-4f30-8e7f-a9475784f796/cert-manager-webhook/0.log" Dec 13 07:41:09 crc kubenswrapper[5048]: I1213 07:41:09.206143 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-v5shf_6a17b3c4-050d-40b0-82d8-d9208365e261/cert-manager-cainjector/0.log" Dec 13 07:41:23 crc kubenswrapper[5048]: I1213 07:41:23.831113 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-6ff7998486-ppsbf_85ef8f1d-8549-4a84-a38c-aefa9e4e5583/nmstate-console-plugin/0.log" Dec 13 07:41:24 crc kubenswrapper[5048]: I1213 07:41:24.059720 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-b4xzd_d4103fb8-a0f4-4b91-b08a-6047a1f4df6e/nmstate-handler/0.log" Dec 13 07:41:24 crc kubenswrapper[5048]: I1213 07:41:24.066917 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f7f7578db-wvmhg_4fe5b93a-b962-4feb-85cf-1210c428f7f6/kube-rbac-proxy/0.log" Dec 13 07:41:24 crc kubenswrapper[5048]: I1213 07:41:24.081068 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f7f7578db-wvmhg_4fe5b93a-b962-4feb-85cf-1210c428f7f6/nmstate-metrics/0.log" Dec 13 07:41:24 crc kubenswrapper[5048]: I1213 07:41:24.279196 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-6769fb99d-lk9l5_ce50f1e5-0177-4f3d-a02a-5af653f70001/nmstate-operator/0.log" Dec 13 07:41:24 crc kubenswrapper[5048]: I1213 07:41:24.294894 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-f8fb84555-bswmw_a6abecb0-a854-4dbb-9fb0-5ba03e64daae/nmstate-webhook/0.log" Dec 13 07:41:41 crc kubenswrapper[5048]: I1213 07:41:41.636343 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-5bddd4b946-x8wnx_2adbe773-2087-440f-bcdd-91ccb9eaa03f/kube-rbac-proxy/0.log" Dec 13 07:41:41 crc kubenswrapper[5048]: I1213 07:41:41.726566 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-5bddd4b946-x8wnx_2adbe773-2087-440f-bcdd-91ccb9eaa03f/controller/0.log" Dec 13 07:41:41 crc kubenswrapper[5048]: I1213 07:41:41.850507 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-frr-files/0.log" Dec 13 07:41:42 crc kubenswrapper[5048]: I1213 07:41:42.046508 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-metrics/0.log" Dec 13 07:41:42 crc kubenswrapper[5048]: I1213 07:41:42.046625 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-reloader/0.log" Dec 13 07:41:42 crc kubenswrapper[5048]: I1213 07:41:42.097484 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-reloader/0.log" Dec 13 07:41:42 crc kubenswrapper[5048]: I1213 07:41:42.109704 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-frr-files/0.log" Dec 13 07:41:42 crc kubenswrapper[5048]: I1213 07:41:42.269031 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-frr-files/0.log" Dec 13 07:41:42 crc kubenswrapper[5048]: I1213 07:41:42.269076 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-reloader/0.log" Dec 13 07:41:42 crc kubenswrapper[5048]: I1213 07:41:42.302942 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-metrics/0.log" Dec 13 07:41:42 crc kubenswrapper[5048]: I1213 07:41:42.328746 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-metrics/0.log" Dec 13 07:41:42 crc kubenswrapper[5048]: I1213 07:41:42.500684 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-frr-files/0.log" Dec 13 07:41:42 crc kubenswrapper[5048]: I1213 07:41:42.509450 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-reloader/0.log" Dec 13 07:41:42 crc kubenswrapper[5048]: I1213 07:41:42.512846 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/cp-metrics/0.log" Dec 13 07:41:42 crc kubenswrapper[5048]: I1213 07:41:42.543282 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/controller/0.log" Dec 13 07:41:42 crc kubenswrapper[5048]: I1213 07:41:42.677861 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/kube-rbac-proxy/0.log" Dec 13 07:41:42 crc kubenswrapper[5048]: I1213 07:41:42.679680 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/frr-metrics/0.log" Dec 13 07:41:42 crc kubenswrapper[5048]: I1213 07:41:42.741972 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/kube-rbac-proxy-frr/0.log" Dec 13 07:41:42 crc kubenswrapper[5048]: I1213 07:41:42.957163 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/reloader/0.log" Dec 13 07:41:42 crc kubenswrapper[5048]: I1213 07:41:42.967154 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7784b6fcf-cwjgt_fb943e2e-85ed-4508-ade6-d16343977d3d/frr-k8s-webhook-server/0.log" Dec 13 07:41:43 crc kubenswrapper[5048]: I1213 07:41:43.175244 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-955d4d9d6-6w2dd_320194d2-61c0-4149-9458-a880711d4edf/manager/0.log" Dec 13 07:41:43 crc kubenswrapper[5048]: I1213 07:41:43.785576 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-56bb4c4c65-p9qls_0618bcc5-697d-4f5b-bbab-a84d868b5d32/webhook-server/0.log" Dec 13 07:41:43 crc kubenswrapper[5048]: I1213 07:41:43.841766 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-fj6dj_3c80ea24-095d-4c79-9328-2b3433da583c/kube-rbac-proxy/0.log" Dec 13 07:41:43 crc kubenswrapper[5048]: I1213 07:41:43.964643 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rm42c_b6da7b5a-20a7-4721-b245-c63202188d2f/frr/0.log" Dec 13 07:41:44 crc kubenswrapper[5048]: I1213 07:41:44.348913 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-fj6dj_3c80ea24-095d-4c79-9328-2b3433da583c/speaker/0.log" Dec 13 07:41:46 crc kubenswrapper[5048]: I1213 07:41:46.215514 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:41:46 crc kubenswrapper[5048]: I1213 07:41:46.215836 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:41:57 crc kubenswrapper[5048]: I1213 07:41:57.152477 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6_fe828039-1927-4005-a732-ce1cb2fb898c/util/0.log" Dec 13 07:41:57 crc kubenswrapper[5048]: I1213 07:41:57.319238 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6_fe828039-1927-4005-a732-ce1cb2fb898c/pull/0.log" Dec 13 07:41:57 crc kubenswrapper[5048]: I1213 07:41:57.323314 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6_fe828039-1927-4005-a732-ce1cb2fb898c/pull/0.log" Dec 13 07:41:57 crc kubenswrapper[5048]: I1213 07:41:57.336863 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6_fe828039-1927-4005-a732-ce1cb2fb898c/util/0.log" Dec 13 07:41:57 crc kubenswrapper[5048]: I1213 07:41:57.473874 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6_fe828039-1927-4005-a732-ce1cb2fb898c/extract/0.log" Dec 13 07:41:57 crc kubenswrapper[5048]: I1213 07:41:57.476514 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6_fe828039-1927-4005-a732-ce1cb2fb898c/util/0.log" Dec 13 07:41:57 crc kubenswrapper[5048]: I1213 07:41:57.484027 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d45hrd6_fe828039-1927-4005-a732-ce1cb2fb898c/pull/0.log" Dec 13 07:41:57 crc kubenswrapper[5048]: I1213 07:41:57.630510 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts_4f04fec8-b2a6-4634-b1af-0c47285bad86/util/0.log" Dec 13 07:41:57 crc kubenswrapper[5048]: I1213 07:41:57.788057 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts_4f04fec8-b2a6-4634-b1af-0c47285bad86/util/0.log" Dec 13 07:41:57 crc kubenswrapper[5048]: I1213 07:41:57.805652 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts_4f04fec8-b2a6-4634-b1af-0c47285bad86/pull/0.log" Dec 13 07:41:57 crc kubenswrapper[5048]: I1213 07:41:57.807283 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts_4f04fec8-b2a6-4634-b1af-0c47285bad86/pull/0.log" Dec 13 07:41:57 crc kubenswrapper[5048]: I1213 07:41:57.970576 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts_4f04fec8-b2a6-4634-b1af-0c47285bad86/util/0.log" Dec 13 07:41:57 crc kubenswrapper[5048]: I1213 07:41:57.977261 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts_4f04fec8-b2a6-4634-b1af-0c47285bad86/extract/0.log" Dec 13 07:41:57 crc kubenswrapper[5048]: I1213 07:41:57.978894 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa84zrts_4f04fec8-b2a6-4634-b1af-0c47285bad86/pull/0.log" Dec 13 07:41:58 crc kubenswrapper[5048]: I1213 07:41:58.135430 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-g8j5s_07c47b88-4b41-47d9-ae48-feacb3431a47/extract-utilities/0.log" Dec 13 07:41:58 crc kubenswrapper[5048]: I1213 07:41:58.302625 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-g8j5s_07c47b88-4b41-47d9-ae48-feacb3431a47/extract-content/0.log" Dec 13 07:41:58 crc kubenswrapper[5048]: I1213 07:41:58.306208 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-g8j5s_07c47b88-4b41-47d9-ae48-feacb3431a47/extract-utilities/0.log" Dec 13 07:41:58 crc kubenswrapper[5048]: I1213 07:41:58.322622 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-g8j5s_07c47b88-4b41-47d9-ae48-feacb3431a47/extract-content/0.log" Dec 13 07:41:58 crc kubenswrapper[5048]: I1213 07:41:58.439037 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-g8j5s_07c47b88-4b41-47d9-ae48-feacb3431a47/extract-utilities/0.log" Dec 13 07:41:58 crc kubenswrapper[5048]: I1213 07:41:58.502067 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-g8j5s_07c47b88-4b41-47d9-ae48-feacb3431a47/extract-content/0.log" Dec 13 07:41:58 crc kubenswrapper[5048]: I1213 07:41:58.658398 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-9l4zr_405bce54-d1ec-43dc-99f7-9181bb7a0a37/extract-utilities/0.log" Dec 13 07:41:58 crc kubenswrapper[5048]: I1213 07:41:58.874912 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-9l4zr_405bce54-d1ec-43dc-99f7-9181bb7a0a37/extract-content/0.log" Dec 13 07:41:58 crc kubenswrapper[5048]: I1213 07:41:58.970612 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-9l4zr_405bce54-d1ec-43dc-99f7-9181bb7a0a37/extract-utilities/0.log" Dec 13 07:41:58 crc kubenswrapper[5048]: I1213 07:41:58.981054 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-9l4zr_405bce54-d1ec-43dc-99f7-9181bb7a0a37/extract-content/0.log" Dec 13 07:41:59 crc kubenswrapper[5048]: I1213 07:41:59.202265 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-g8j5s_07c47b88-4b41-47d9-ae48-feacb3431a47/registry-server/0.log" Dec 13 07:41:59 crc kubenswrapper[5048]: I1213 07:41:59.277990 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-9l4zr_405bce54-d1ec-43dc-99f7-9181bb7a0a37/extract-utilities/0.log" Dec 13 07:41:59 crc kubenswrapper[5048]: I1213 07:41:59.279082 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-9l4zr_405bce54-d1ec-43dc-99f7-9181bb7a0a37/extract-content/0.log" Dec 13 07:41:59 crc kubenswrapper[5048]: I1213 07:41:59.484493 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-9l4zr_405bce54-d1ec-43dc-99f7-9181bb7a0a37/registry-server/0.log" Dec 13 07:41:59 crc kubenswrapper[5048]: I1213 07:41:59.542888 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-zm7qx_60f5ae10-2f86-46f8-b613-f017b8753690/marketplace-operator/2.log" Dec 13 07:41:59 crc kubenswrapper[5048]: I1213 07:41:59.552949 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-zm7qx_60f5ae10-2f86-46f8-b613-f017b8753690/marketplace-operator/3.log" Dec 13 07:41:59 crc kubenswrapper[5048]: I1213 07:41:59.684890 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bgnnj_0cdd6ca2-d077-4ea1-8dda-6fadccca087d/extract-utilities/0.log" Dec 13 07:41:59 crc kubenswrapper[5048]: I1213 07:41:59.849952 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bgnnj_0cdd6ca2-d077-4ea1-8dda-6fadccca087d/extract-content/0.log" Dec 13 07:41:59 crc kubenswrapper[5048]: I1213 07:41:59.852937 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bgnnj_0cdd6ca2-d077-4ea1-8dda-6fadccca087d/extract-utilities/0.log" Dec 13 07:41:59 crc kubenswrapper[5048]: I1213 07:41:59.886439 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bgnnj_0cdd6ca2-d077-4ea1-8dda-6fadccca087d/extract-content/0.log" Dec 13 07:42:00 crc kubenswrapper[5048]: I1213 07:42:00.045915 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bgnnj_0cdd6ca2-d077-4ea1-8dda-6fadccca087d/extract-content/0.log" Dec 13 07:42:00 crc kubenswrapper[5048]: I1213 07:42:00.056358 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bgnnj_0cdd6ca2-d077-4ea1-8dda-6fadccca087d/extract-utilities/0.log" Dec 13 07:42:00 crc kubenswrapper[5048]: I1213 07:42:00.167558 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bgnnj_0cdd6ca2-d077-4ea1-8dda-6fadccca087d/registry-server/0.log" Dec 13 07:42:00 crc kubenswrapper[5048]: I1213 07:42:00.308262 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dqz4m_152d7097-80ce-42d3-b0a8-45e04a295b3d/extract-utilities/0.log" Dec 13 07:42:00 crc kubenswrapper[5048]: I1213 07:42:00.493705 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dqz4m_152d7097-80ce-42d3-b0a8-45e04a295b3d/extract-content/0.log" Dec 13 07:42:00 crc kubenswrapper[5048]: I1213 07:42:00.500062 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dqz4m_152d7097-80ce-42d3-b0a8-45e04a295b3d/extract-utilities/0.log" Dec 13 07:42:00 crc kubenswrapper[5048]: I1213 07:42:00.534075 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dqz4m_152d7097-80ce-42d3-b0a8-45e04a295b3d/extract-content/0.log" Dec 13 07:42:00 crc kubenswrapper[5048]: I1213 07:42:00.706256 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dqz4m_152d7097-80ce-42d3-b0a8-45e04a295b3d/extract-utilities/0.log" Dec 13 07:42:00 crc kubenswrapper[5048]: I1213 07:42:00.754843 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dqz4m_152d7097-80ce-42d3-b0a8-45e04a295b3d/extract-content/0.log" Dec 13 07:42:01 crc kubenswrapper[5048]: I1213 07:42:01.233386 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dqz4m_152d7097-80ce-42d3-b0a8-45e04a295b3d/registry-server/0.log" Dec 13 07:42:16 crc kubenswrapper[5048]: I1213 07:42:16.215837 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:42:16 crc kubenswrapper[5048]: I1213 07:42:16.216472 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:42:26 crc kubenswrapper[5048]: I1213 07:42:26.558408 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wn5rk"] Dec 13 07:42:26 crc kubenswrapper[5048]: E1213 07:42:26.559989 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6" containerName="container-00" Dec 13 07:42:26 crc kubenswrapper[5048]: I1213 07:42:26.560065 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6" containerName="container-00" Dec 13 07:42:26 crc kubenswrapper[5048]: I1213 07:42:26.560312 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff826bac-310b-4e92-a5c9-7c0fd6a2fbb6" containerName="container-00" Dec 13 07:42:26 crc kubenswrapper[5048]: I1213 07:42:26.561646 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wn5rk" Dec 13 07:42:26 crc kubenswrapper[5048]: I1213 07:42:26.603081 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wn5rk"] Dec 13 07:42:26 crc kubenswrapper[5048]: I1213 07:42:26.669466 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nn4cv\" (UniqueName: \"kubernetes.io/projected/5acee64b-373c-4093-bae7-c2ccad1b7d8e-kube-api-access-nn4cv\") pod \"redhat-operators-wn5rk\" (UID: \"5acee64b-373c-4093-bae7-c2ccad1b7d8e\") " pod="openshift-marketplace/redhat-operators-wn5rk" Dec 13 07:42:26 crc kubenswrapper[5048]: I1213 07:42:26.669538 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5acee64b-373c-4093-bae7-c2ccad1b7d8e-catalog-content\") pod \"redhat-operators-wn5rk\" (UID: \"5acee64b-373c-4093-bae7-c2ccad1b7d8e\") " pod="openshift-marketplace/redhat-operators-wn5rk" Dec 13 07:42:26 crc kubenswrapper[5048]: I1213 07:42:26.669616 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5acee64b-373c-4093-bae7-c2ccad1b7d8e-utilities\") pod \"redhat-operators-wn5rk\" (UID: \"5acee64b-373c-4093-bae7-c2ccad1b7d8e\") " pod="openshift-marketplace/redhat-operators-wn5rk" Dec 13 07:42:26 crc kubenswrapper[5048]: I1213 07:42:26.771716 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5acee64b-373c-4093-bae7-c2ccad1b7d8e-utilities\") pod \"redhat-operators-wn5rk\" (UID: \"5acee64b-373c-4093-bae7-c2ccad1b7d8e\") " pod="openshift-marketplace/redhat-operators-wn5rk" Dec 13 07:42:26 crc kubenswrapper[5048]: I1213 07:42:26.771830 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nn4cv\" (UniqueName: \"kubernetes.io/projected/5acee64b-373c-4093-bae7-c2ccad1b7d8e-kube-api-access-nn4cv\") pod \"redhat-operators-wn5rk\" (UID: \"5acee64b-373c-4093-bae7-c2ccad1b7d8e\") " pod="openshift-marketplace/redhat-operators-wn5rk" Dec 13 07:42:26 crc kubenswrapper[5048]: I1213 07:42:26.771871 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5acee64b-373c-4093-bae7-c2ccad1b7d8e-catalog-content\") pod \"redhat-operators-wn5rk\" (UID: \"5acee64b-373c-4093-bae7-c2ccad1b7d8e\") " pod="openshift-marketplace/redhat-operators-wn5rk" Dec 13 07:42:26 crc kubenswrapper[5048]: I1213 07:42:26.772220 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5acee64b-373c-4093-bae7-c2ccad1b7d8e-utilities\") pod \"redhat-operators-wn5rk\" (UID: \"5acee64b-373c-4093-bae7-c2ccad1b7d8e\") " pod="openshift-marketplace/redhat-operators-wn5rk" Dec 13 07:42:26 crc kubenswrapper[5048]: I1213 07:42:26.772253 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5acee64b-373c-4093-bae7-c2ccad1b7d8e-catalog-content\") pod \"redhat-operators-wn5rk\" (UID: \"5acee64b-373c-4093-bae7-c2ccad1b7d8e\") " pod="openshift-marketplace/redhat-operators-wn5rk" Dec 13 07:42:26 crc kubenswrapper[5048]: I1213 07:42:26.806414 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nn4cv\" (UniqueName: \"kubernetes.io/projected/5acee64b-373c-4093-bae7-c2ccad1b7d8e-kube-api-access-nn4cv\") pod \"redhat-operators-wn5rk\" (UID: \"5acee64b-373c-4093-bae7-c2ccad1b7d8e\") " pod="openshift-marketplace/redhat-operators-wn5rk" Dec 13 07:42:26 crc kubenswrapper[5048]: I1213 07:42:26.900700 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wn5rk" Dec 13 07:42:27 crc kubenswrapper[5048]: I1213 07:42:27.457789 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wn5rk"] Dec 13 07:42:28 crc kubenswrapper[5048]: I1213 07:42:28.215490 5048 generic.go:334] "Generic (PLEG): container finished" podID="5acee64b-373c-4093-bae7-c2ccad1b7d8e" containerID="ba0d3885c251e6e8ce5fe7cdfb96c72b558913bc78d2e3414d8c076e136498a4" exitCode=0 Dec 13 07:42:28 crc kubenswrapper[5048]: I1213 07:42:28.215687 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wn5rk" event={"ID":"5acee64b-373c-4093-bae7-c2ccad1b7d8e","Type":"ContainerDied","Data":"ba0d3885c251e6e8ce5fe7cdfb96c72b558913bc78d2e3414d8c076e136498a4"} Dec 13 07:42:28 crc kubenswrapper[5048]: I1213 07:42:28.215860 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wn5rk" event={"ID":"5acee64b-373c-4093-bae7-c2ccad1b7d8e","Type":"ContainerStarted","Data":"7d6c331e48544fa3d636deba2c8cb03f247ad7497de9b2905ffd26bcd85a9836"} Dec 13 07:42:30 crc kubenswrapper[5048]: I1213 07:42:30.237677 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wn5rk" event={"ID":"5acee64b-373c-4093-bae7-c2ccad1b7d8e","Type":"ContainerStarted","Data":"65ec9e1eebaa97ef7432453723162f997883ce09a2b4a8b2a2f95053791af14a"} Dec 13 07:42:34 crc kubenswrapper[5048]: I1213 07:42:34.271890 5048 generic.go:334] "Generic (PLEG): container finished" podID="5acee64b-373c-4093-bae7-c2ccad1b7d8e" containerID="65ec9e1eebaa97ef7432453723162f997883ce09a2b4a8b2a2f95053791af14a" exitCode=0 Dec 13 07:42:34 crc kubenswrapper[5048]: I1213 07:42:34.271965 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wn5rk" event={"ID":"5acee64b-373c-4093-bae7-c2ccad1b7d8e","Type":"ContainerDied","Data":"65ec9e1eebaa97ef7432453723162f997883ce09a2b4a8b2a2f95053791af14a"} Dec 13 07:42:36 crc kubenswrapper[5048]: I1213 07:42:36.311537 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wn5rk" event={"ID":"5acee64b-373c-4093-bae7-c2ccad1b7d8e","Type":"ContainerStarted","Data":"f8218754a9c06e83867dc96a0429daf2f282484064939dd8f8c9c31960812148"} Dec 13 07:42:36 crc kubenswrapper[5048]: I1213 07:42:36.339589 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wn5rk" podStartSLOduration=3.149477573 podStartE2EDuration="10.339566731s" podCreationTimestamp="2025-12-13 07:42:26 +0000 UTC" firstStartedPulling="2025-12-13 07:42:28.217039601 +0000 UTC m=+4382.083634182" lastFinishedPulling="2025-12-13 07:42:35.407128739 +0000 UTC m=+4389.273723340" observedRunningTime="2025-12-13 07:42:36.330807292 +0000 UTC m=+4390.197401893" watchObservedRunningTime="2025-12-13 07:42:36.339566731 +0000 UTC m=+4390.206161322" Dec 13 07:42:36 crc kubenswrapper[5048]: I1213 07:42:36.901409 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wn5rk" Dec 13 07:42:36 crc kubenswrapper[5048]: I1213 07:42:36.901773 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wn5rk" Dec 13 07:42:37 crc kubenswrapper[5048]: I1213 07:42:37.949527 5048 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wn5rk" podUID="5acee64b-373c-4093-bae7-c2ccad1b7d8e" containerName="registry-server" probeResult="failure" output=< Dec 13 07:42:37 crc kubenswrapper[5048]: timeout: failed to connect service ":50051" within 1s Dec 13 07:42:37 crc kubenswrapper[5048]: > Dec 13 07:42:46 crc kubenswrapper[5048]: I1213 07:42:46.216498 5048 patch_prober.go:28] interesting pod/machine-config-daemon-j7hns container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 13 07:42:46 crc kubenswrapper[5048]: I1213 07:42:46.217092 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 13 07:42:46 crc kubenswrapper[5048]: I1213 07:42:46.217143 5048 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" Dec 13 07:42:46 crc kubenswrapper[5048]: I1213 07:42:46.217859 5048 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5"} pod="openshift-machine-config-operator/machine-config-daemon-j7hns" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 13 07:42:46 crc kubenswrapper[5048]: I1213 07:42:46.217920 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerName="machine-config-daemon" containerID="cri-o://a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" gracePeriod=600 Dec 13 07:42:46 crc kubenswrapper[5048]: I1213 07:42:46.402788 5048 generic.go:334] "Generic (PLEG): container finished" podID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" exitCode=0 Dec 13 07:42:46 crc kubenswrapper[5048]: I1213 07:42:46.403880 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerDied","Data":"a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5"} Dec 13 07:42:46 crc kubenswrapper[5048]: I1213 07:42:46.403932 5048 scope.go:117] "RemoveContainer" containerID="39970abd8e25b0365871d71368dd87c07faf6d5fd36ea59cdd3743f467d385a2" Dec 13 07:42:46 crc kubenswrapper[5048]: E1213 07:42:46.404941 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:42:46 crc kubenswrapper[5048]: I1213 07:42:46.948690 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wn5rk" Dec 13 07:42:47 crc kubenswrapper[5048]: I1213 07:42:47.013136 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wn5rk" Dec 13 07:42:47 crc kubenswrapper[5048]: I1213 07:42:47.183657 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wn5rk"] Dec 13 07:42:47 crc kubenswrapper[5048]: I1213 07:42:47.415286 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:42:47 crc kubenswrapper[5048]: E1213 07:42:47.415781 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:42:48 crc kubenswrapper[5048]: I1213 07:42:48.431157 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wn5rk" podUID="5acee64b-373c-4093-bae7-c2ccad1b7d8e" containerName="registry-server" containerID="cri-o://f8218754a9c06e83867dc96a0429daf2f282484064939dd8f8c9c31960812148" gracePeriod=2 Dec 13 07:42:48 crc kubenswrapper[5048]: I1213 07:42:48.931976 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wn5rk" Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.015420 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5acee64b-373c-4093-bae7-c2ccad1b7d8e-catalog-content\") pod \"5acee64b-373c-4093-bae7-c2ccad1b7d8e\" (UID: \"5acee64b-373c-4093-bae7-c2ccad1b7d8e\") " Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.015606 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5acee64b-373c-4093-bae7-c2ccad1b7d8e-utilities\") pod \"5acee64b-373c-4093-bae7-c2ccad1b7d8e\" (UID: \"5acee64b-373c-4093-bae7-c2ccad1b7d8e\") " Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.015694 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nn4cv\" (UniqueName: \"kubernetes.io/projected/5acee64b-373c-4093-bae7-c2ccad1b7d8e-kube-api-access-nn4cv\") pod \"5acee64b-373c-4093-bae7-c2ccad1b7d8e\" (UID: \"5acee64b-373c-4093-bae7-c2ccad1b7d8e\") " Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.016517 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5acee64b-373c-4093-bae7-c2ccad1b7d8e-utilities" (OuterVolumeSpecName: "utilities") pod "5acee64b-373c-4093-bae7-c2ccad1b7d8e" (UID: "5acee64b-373c-4093-bae7-c2ccad1b7d8e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.020798 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5acee64b-373c-4093-bae7-c2ccad1b7d8e-kube-api-access-nn4cv" (OuterVolumeSpecName: "kube-api-access-nn4cv") pod "5acee64b-373c-4093-bae7-c2ccad1b7d8e" (UID: "5acee64b-373c-4093-bae7-c2ccad1b7d8e"). InnerVolumeSpecName "kube-api-access-nn4cv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.117919 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5acee64b-373c-4093-bae7-c2ccad1b7d8e-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.117961 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nn4cv\" (UniqueName: \"kubernetes.io/projected/5acee64b-373c-4093-bae7-c2ccad1b7d8e-kube-api-access-nn4cv\") on node \"crc\" DevicePath \"\"" Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.129817 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5acee64b-373c-4093-bae7-c2ccad1b7d8e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5acee64b-373c-4093-bae7-c2ccad1b7d8e" (UID: "5acee64b-373c-4093-bae7-c2ccad1b7d8e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.219263 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5acee64b-373c-4093-bae7-c2ccad1b7d8e-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.441038 5048 generic.go:334] "Generic (PLEG): container finished" podID="5acee64b-373c-4093-bae7-c2ccad1b7d8e" containerID="f8218754a9c06e83867dc96a0429daf2f282484064939dd8f8c9c31960812148" exitCode=0 Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.441081 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wn5rk" Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.441097 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wn5rk" event={"ID":"5acee64b-373c-4093-bae7-c2ccad1b7d8e","Type":"ContainerDied","Data":"f8218754a9c06e83867dc96a0429daf2f282484064939dd8f8c9c31960812148"} Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.442478 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wn5rk" event={"ID":"5acee64b-373c-4093-bae7-c2ccad1b7d8e","Type":"ContainerDied","Data":"7d6c331e48544fa3d636deba2c8cb03f247ad7497de9b2905ffd26bcd85a9836"} Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.442503 5048 scope.go:117] "RemoveContainer" containerID="f8218754a9c06e83867dc96a0429daf2f282484064939dd8f8c9c31960812148" Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.472515 5048 scope.go:117] "RemoveContainer" containerID="65ec9e1eebaa97ef7432453723162f997883ce09a2b4a8b2a2f95053791af14a" Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.498887 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wn5rk"] Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.508174 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wn5rk"] Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.510755 5048 scope.go:117] "RemoveContainer" containerID="ba0d3885c251e6e8ce5fe7cdfb96c72b558913bc78d2e3414d8c076e136498a4" Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.554050 5048 scope.go:117] "RemoveContainer" containerID="f8218754a9c06e83867dc96a0429daf2f282484064939dd8f8c9c31960812148" Dec 13 07:42:49 crc kubenswrapper[5048]: E1213 07:42:49.554712 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f8218754a9c06e83867dc96a0429daf2f282484064939dd8f8c9c31960812148\": container with ID starting with f8218754a9c06e83867dc96a0429daf2f282484064939dd8f8c9c31960812148 not found: ID does not exist" containerID="f8218754a9c06e83867dc96a0429daf2f282484064939dd8f8c9c31960812148" Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.554919 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f8218754a9c06e83867dc96a0429daf2f282484064939dd8f8c9c31960812148"} err="failed to get container status \"f8218754a9c06e83867dc96a0429daf2f282484064939dd8f8c9c31960812148\": rpc error: code = NotFound desc = could not find container \"f8218754a9c06e83867dc96a0429daf2f282484064939dd8f8c9c31960812148\": container with ID starting with f8218754a9c06e83867dc96a0429daf2f282484064939dd8f8c9c31960812148 not found: ID does not exist" Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.555096 5048 scope.go:117] "RemoveContainer" containerID="65ec9e1eebaa97ef7432453723162f997883ce09a2b4a8b2a2f95053791af14a" Dec 13 07:42:49 crc kubenswrapper[5048]: E1213 07:42:49.555643 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65ec9e1eebaa97ef7432453723162f997883ce09a2b4a8b2a2f95053791af14a\": container with ID starting with 65ec9e1eebaa97ef7432453723162f997883ce09a2b4a8b2a2f95053791af14a not found: ID does not exist" containerID="65ec9e1eebaa97ef7432453723162f997883ce09a2b4a8b2a2f95053791af14a" Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.555713 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65ec9e1eebaa97ef7432453723162f997883ce09a2b4a8b2a2f95053791af14a"} err="failed to get container status \"65ec9e1eebaa97ef7432453723162f997883ce09a2b4a8b2a2f95053791af14a\": rpc error: code = NotFound desc = could not find container \"65ec9e1eebaa97ef7432453723162f997883ce09a2b4a8b2a2f95053791af14a\": container with ID starting with 65ec9e1eebaa97ef7432453723162f997883ce09a2b4a8b2a2f95053791af14a not found: ID does not exist" Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.555749 5048 scope.go:117] "RemoveContainer" containerID="ba0d3885c251e6e8ce5fe7cdfb96c72b558913bc78d2e3414d8c076e136498a4" Dec 13 07:42:49 crc kubenswrapper[5048]: E1213 07:42:49.556107 5048 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba0d3885c251e6e8ce5fe7cdfb96c72b558913bc78d2e3414d8c076e136498a4\": container with ID starting with ba0d3885c251e6e8ce5fe7cdfb96c72b558913bc78d2e3414d8c076e136498a4 not found: ID does not exist" containerID="ba0d3885c251e6e8ce5fe7cdfb96c72b558913bc78d2e3414d8c076e136498a4" Dec 13 07:42:49 crc kubenswrapper[5048]: I1213 07:42:49.556176 5048 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba0d3885c251e6e8ce5fe7cdfb96c72b558913bc78d2e3414d8c076e136498a4"} err="failed to get container status \"ba0d3885c251e6e8ce5fe7cdfb96c72b558913bc78d2e3414d8c076e136498a4\": rpc error: code = NotFound desc = could not find container \"ba0d3885c251e6e8ce5fe7cdfb96c72b558913bc78d2e3414d8c076e136498a4\": container with ID starting with ba0d3885c251e6e8ce5fe7cdfb96c72b558913bc78d2e3414d8c076e136498a4 not found: ID does not exist" Dec 13 07:42:50 crc kubenswrapper[5048]: I1213 07:42:50.578830 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5acee64b-373c-4093-bae7-c2ccad1b7d8e" path="/var/lib/kubelet/pods/5acee64b-373c-4093-bae7-c2ccad1b7d8e/volumes" Dec 13 07:42:58 crc kubenswrapper[5048]: I1213 07:42:58.568287 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:42:58 crc kubenswrapper[5048]: E1213 07:42:58.569617 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:43:11 crc kubenswrapper[5048]: I1213 07:43:11.567099 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:43:11 crc kubenswrapper[5048]: E1213 07:43:11.568128 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:43:25 crc kubenswrapper[5048]: I1213 07:43:25.566821 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:43:25 crc kubenswrapper[5048]: E1213 07:43:25.567505 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:43:40 crc kubenswrapper[5048]: I1213 07:43:40.567279 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:43:40 crc kubenswrapper[5048]: E1213 07:43:40.568427 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:43:47 crc kubenswrapper[5048]: I1213 07:43:47.434731 5048 generic.go:334] "Generic (PLEG): container finished" podID="3a336011-c6da-4a8e-a04b-d1981a139f23" containerID="75ba6958a187d82f97c76197f34c82c62372684a1345291f0a5b87f869f7d96f" exitCode=0 Dec 13 07:43:47 crc kubenswrapper[5048]: I1213 07:43:47.434845 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8djz2/must-gather-llm8s" event={"ID":"3a336011-c6da-4a8e-a04b-d1981a139f23","Type":"ContainerDied","Data":"75ba6958a187d82f97c76197f34c82c62372684a1345291f0a5b87f869f7d96f"} Dec 13 07:43:47 crc kubenswrapper[5048]: I1213 07:43:47.435951 5048 scope.go:117] "RemoveContainer" containerID="75ba6958a187d82f97c76197f34c82c62372684a1345291f0a5b87f869f7d96f" Dec 13 07:43:48 crc kubenswrapper[5048]: I1213 07:43:48.427875 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-8djz2_must-gather-llm8s_3a336011-c6da-4a8e-a04b-d1981a139f23/gather/0.log" Dec 13 07:43:52 crc kubenswrapper[5048]: I1213 07:43:52.566763 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:43:52 crc kubenswrapper[5048]: E1213 07:43:52.567517 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:43:59 crc kubenswrapper[5048]: I1213 07:43:59.093728 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-8djz2/must-gather-llm8s"] Dec 13 07:43:59 crc kubenswrapper[5048]: I1213 07:43:59.094491 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-8djz2/must-gather-llm8s" podUID="3a336011-c6da-4a8e-a04b-d1981a139f23" containerName="copy" containerID="cri-o://632808db1d8bfe0077a442e6b55424f63358fe38306cf75539e3982ee918fdef" gracePeriod=2 Dec 13 07:43:59 crc kubenswrapper[5048]: I1213 07:43:59.114003 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-8djz2/must-gather-llm8s"] Dec 13 07:43:59 crc kubenswrapper[5048]: I1213 07:43:59.547052 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-8djz2_must-gather-llm8s_3a336011-c6da-4a8e-a04b-d1981a139f23/copy/0.log" Dec 13 07:43:59 crc kubenswrapper[5048]: I1213 07:43:59.547661 5048 generic.go:334] "Generic (PLEG): container finished" podID="3a336011-c6da-4a8e-a04b-d1981a139f23" containerID="632808db1d8bfe0077a442e6b55424f63358fe38306cf75539e3982ee918fdef" exitCode=143 Dec 13 07:44:00 crc kubenswrapper[5048]: I1213 07:44:00.228889 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-8djz2_must-gather-llm8s_3a336011-c6da-4a8e-a04b-d1981a139f23/copy/0.log" Dec 13 07:44:00 crc kubenswrapper[5048]: I1213 07:44:00.229529 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8djz2/must-gather-llm8s" Dec 13 07:44:00 crc kubenswrapper[5048]: I1213 07:44:00.272429 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttjdw\" (UniqueName: \"kubernetes.io/projected/3a336011-c6da-4a8e-a04b-d1981a139f23-kube-api-access-ttjdw\") pod \"3a336011-c6da-4a8e-a04b-d1981a139f23\" (UID: \"3a336011-c6da-4a8e-a04b-d1981a139f23\") " Dec 13 07:44:00 crc kubenswrapper[5048]: I1213 07:44:00.272623 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3a336011-c6da-4a8e-a04b-d1981a139f23-must-gather-output\") pod \"3a336011-c6da-4a8e-a04b-d1981a139f23\" (UID: \"3a336011-c6da-4a8e-a04b-d1981a139f23\") " Dec 13 07:44:00 crc kubenswrapper[5048]: I1213 07:44:00.280216 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a336011-c6da-4a8e-a04b-d1981a139f23-kube-api-access-ttjdw" (OuterVolumeSpecName: "kube-api-access-ttjdw") pod "3a336011-c6da-4a8e-a04b-d1981a139f23" (UID: "3a336011-c6da-4a8e-a04b-d1981a139f23"). InnerVolumeSpecName "kube-api-access-ttjdw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:44:00 crc kubenswrapper[5048]: I1213 07:44:00.376528 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttjdw\" (UniqueName: \"kubernetes.io/projected/3a336011-c6da-4a8e-a04b-d1981a139f23-kube-api-access-ttjdw\") on node \"crc\" DevicePath \"\"" Dec 13 07:44:00 crc kubenswrapper[5048]: I1213 07:44:00.458046 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a336011-c6da-4a8e-a04b-d1981a139f23-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "3a336011-c6da-4a8e-a04b-d1981a139f23" (UID: "3a336011-c6da-4a8e-a04b-d1981a139f23"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:44:00 crc kubenswrapper[5048]: I1213 07:44:00.478380 5048 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3a336011-c6da-4a8e-a04b-d1981a139f23-must-gather-output\") on node \"crc\" DevicePath \"\"" Dec 13 07:44:00 crc kubenswrapper[5048]: I1213 07:44:00.558544 5048 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-8djz2_must-gather-llm8s_3a336011-c6da-4a8e-a04b-d1981a139f23/copy/0.log" Dec 13 07:44:00 crc kubenswrapper[5048]: I1213 07:44:00.558866 5048 scope.go:117] "RemoveContainer" containerID="632808db1d8bfe0077a442e6b55424f63358fe38306cf75539e3982ee918fdef" Dec 13 07:44:00 crc kubenswrapper[5048]: I1213 07:44:00.558936 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8djz2/must-gather-llm8s" Dec 13 07:44:00 crc kubenswrapper[5048]: I1213 07:44:00.580875 5048 scope.go:117] "RemoveContainer" containerID="75ba6958a187d82f97c76197f34c82c62372684a1345291f0a5b87f869f7d96f" Dec 13 07:44:00 crc kubenswrapper[5048]: I1213 07:44:00.611453 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a336011-c6da-4a8e-a04b-d1981a139f23" path="/var/lib/kubelet/pods/3a336011-c6da-4a8e-a04b-d1981a139f23/volumes" Dec 13 07:44:03 crc kubenswrapper[5048]: I1213 07:44:03.567007 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:44:03 crc kubenswrapper[5048]: E1213 07:44:03.567797 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:44:14 crc kubenswrapper[5048]: I1213 07:44:14.566816 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:44:14 crc kubenswrapper[5048]: E1213 07:44:14.567670 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:44:23 crc kubenswrapper[5048]: I1213 07:44:23.663950 5048 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/neutron-6465fd554f-k9lkr" podUID="4a6223fc-c3a5-462d-b61d-ebd353bbe7ca" containerName="neutron-api" probeResult="failure" output="HTTP probe failed with statuscode: 502" Dec 13 07:44:25 crc kubenswrapper[5048]: I1213 07:44:25.567741 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:44:25 crc kubenswrapper[5048]: E1213 07:44:25.568669 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:44:36 crc kubenswrapper[5048]: I1213 07:44:36.574845 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:44:36 crc kubenswrapper[5048]: E1213 07:44:36.575810 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:44:51 crc kubenswrapper[5048]: I1213 07:44:51.567896 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:44:51 crc kubenswrapper[5048]: E1213 07:44:51.569218 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:44:56 crc kubenswrapper[5048]: I1213 07:44:56.319565 5048 scope.go:117] "RemoveContainer" containerID="69f79b0584fcc03324fef0a07d9d0b22e76e9748462197d49829d87d571deb4c" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.175976 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426865-nk9wf"] Dec 13 07:45:00 crc kubenswrapper[5048]: E1213 07:45:00.176849 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5acee64b-373c-4093-bae7-c2ccad1b7d8e" containerName="registry-server" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.176864 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="5acee64b-373c-4093-bae7-c2ccad1b7d8e" containerName="registry-server" Dec 13 07:45:00 crc kubenswrapper[5048]: E1213 07:45:00.176886 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a336011-c6da-4a8e-a04b-d1981a139f23" containerName="gather" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.176894 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a336011-c6da-4a8e-a04b-d1981a139f23" containerName="gather" Dec 13 07:45:00 crc kubenswrapper[5048]: E1213 07:45:00.176926 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a336011-c6da-4a8e-a04b-d1981a139f23" containerName="copy" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.176934 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a336011-c6da-4a8e-a04b-d1981a139f23" containerName="copy" Dec 13 07:45:00 crc kubenswrapper[5048]: E1213 07:45:00.176955 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5acee64b-373c-4093-bae7-c2ccad1b7d8e" containerName="extract-utilities" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.176963 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="5acee64b-373c-4093-bae7-c2ccad1b7d8e" containerName="extract-utilities" Dec 13 07:45:00 crc kubenswrapper[5048]: E1213 07:45:00.176990 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5acee64b-373c-4093-bae7-c2ccad1b7d8e" containerName="extract-content" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.176998 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="5acee64b-373c-4093-bae7-c2ccad1b7d8e" containerName="extract-content" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.177232 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a336011-c6da-4a8e-a04b-d1981a139f23" containerName="gather" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.177252 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a336011-c6da-4a8e-a04b-d1981a139f23" containerName="copy" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.177269 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="5acee64b-373c-4093-bae7-c2ccad1b7d8e" containerName="registry-server" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.178581 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426865-nk9wf" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.180311 5048 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.180335 5048 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.206115 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426865-nk9wf"] Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.242884 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwgfp\" (UniqueName: \"kubernetes.io/projected/795af8bd-658c-4af1-9df6-ec87eb3539a7-kube-api-access-lwgfp\") pod \"collect-profiles-29426865-nk9wf\" (UID: \"795af8bd-658c-4af1-9df6-ec87eb3539a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426865-nk9wf" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.242926 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/795af8bd-658c-4af1-9df6-ec87eb3539a7-config-volume\") pod \"collect-profiles-29426865-nk9wf\" (UID: \"795af8bd-658c-4af1-9df6-ec87eb3539a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426865-nk9wf" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.242972 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/795af8bd-658c-4af1-9df6-ec87eb3539a7-secret-volume\") pod \"collect-profiles-29426865-nk9wf\" (UID: \"795af8bd-658c-4af1-9df6-ec87eb3539a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426865-nk9wf" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.345151 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwgfp\" (UniqueName: \"kubernetes.io/projected/795af8bd-658c-4af1-9df6-ec87eb3539a7-kube-api-access-lwgfp\") pod \"collect-profiles-29426865-nk9wf\" (UID: \"795af8bd-658c-4af1-9df6-ec87eb3539a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426865-nk9wf" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.345220 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/795af8bd-658c-4af1-9df6-ec87eb3539a7-config-volume\") pod \"collect-profiles-29426865-nk9wf\" (UID: \"795af8bd-658c-4af1-9df6-ec87eb3539a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426865-nk9wf" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.345276 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/795af8bd-658c-4af1-9df6-ec87eb3539a7-secret-volume\") pod \"collect-profiles-29426865-nk9wf\" (UID: \"795af8bd-658c-4af1-9df6-ec87eb3539a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426865-nk9wf" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.346191 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/795af8bd-658c-4af1-9df6-ec87eb3539a7-config-volume\") pod \"collect-profiles-29426865-nk9wf\" (UID: \"795af8bd-658c-4af1-9df6-ec87eb3539a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426865-nk9wf" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.350857 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/795af8bd-658c-4af1-9df6-ec87eb3539a7-secret-volume\") pod \"collect-profiles-29426865-nk9wf\" (UID: \"795af8bd-658c-4af1-9df6-ec87eb3539a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426865-nk9wf" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.361265 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwgfp\" (UniqueName: \"kubernetes.io/projected/795af8bd-658c-4af1-9df6-ec87eb3539a7-kube-api-access-lwgfp\") pod \"collect-profiles-29426865-nk9wf\" (UID: \"795af8bd-658c-4af1-9df6-ec87eb3539a7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29426865-nk9wf" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.512670 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426865-nk9wf" Dec 13 07:45:00 crc kubenswrapper[5048]: I1213 07:45:00.977730 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426865-nk9wf"] Dec 13 07:45:01 crc kubenswrapper[5048]: I1213 07:45:01.146248 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29426865-nk9wf" event={"ID":"795af8bd-658c-4af1-9df6-ec87eb3539a7","Type":"ContainerStarted","Data":"393677fcc3062f575f9fad1d529ccfad0d219886cdcfd252c9d330154adc32d2"} Dec 13 07:45:01 crc kubenswrapper[5048]: I1213 07:45:01.146578 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29426865-nk9wf" event={"ID":"795af8bd-658c-4af1-9df6-ec87eb3539a7","Type":"ContainerStarted","Data":"9a105b02ad2128f9fd2e47fd4941808f5e373d2771978589fc3b95e9d328154d"} Dec 13 07:45:01 crc kubenswrapper[5048]: I1213 07:45:01.166258 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29426865-nk9wf" podStartSLOduration=1.166218882 podStartE2EDuration="1.166218882s" podCreationTimestamp="2025-12-13 07:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-13 07:45:01.158556323 +0000 UTC m=+4535.025150924" watchObservedRunningTime="2025-12-13 07:45:01.166218882 +0000 UTC m=+4535.032813463" Dec 13 07:45:02 crc kubenswrapper[5048]: I1213 07:45:02.162131 5048 generic.go:334] "Generic (PLEG): container finished" podID="795af8bd-658c-4af1-9df6-ec87eb3539a7" containerID="393677fcc3062f575f9fad1d529ccfad0d219886cdcfd252c9d330154adc32d2" exitCode=0 Dec 13 07:45:02 crc kubenswrapper[5048]: I1213 07:45:02.162476 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29426865-nk9wf" event={"ID":"795af8bd-658c-4af1-9df6-ec87eb3539a7","Type":"ContainerDied","Data":"393677fcc3062f575f9fad1d529ccfad0d219886cdcfd252c9d330154adc32d2"} Dec 13 07:45:03 crc kubenswrapper[5048]: I1213 07:45:03.587659 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426865-nk9wf" Dec 13 07:45:03 crc kubenswrapper[5048]: I1213 07:45:03.711117 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/795af8bd-658c-4af1-9df6-ec87eb3539a7-secret-volume\") pod \"795af8bd-658c-4af1-9df6-ec87eb3539a7\" (UID: \"795af8bd-658c-4af1-9df6-ec87eb3539a7\") " Dec 13 07:45:03 crc kubenswrapper[5048]: I1213 07:45:03.711363 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/795af8bd-658c-4af1-9df6-ec87eb3539a7-config-volume\") pod \"795af8bd-658c-4af1-9df6-ec87eb3539a7\" (UID: \"795af8bd-658c-4af1-9df6-ec87eb3539a7\") " Dec 13 07:45:03 crc kubenswrapper[5048]: I1213 07:45:03.711526 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lwgfp\" (UniqueName: \"kubernetes.io/projected/795af8bd-658c-4af1-9df6-ec87eb3539a7-kube-api-access-lwgfp\") pod \"795af8bd-658c-4af1-9df6-ec87eb3539a7\" (UID: \"795af8bd-658c-4af1-9df6-ec87eb3539a7\") " Dec 13 07:45:03 crc kubenswrapper[5048]: I1213 07:45:03.712721 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/795af8bd-658c-4af1-9df6-ec87eb3539a7-config-volume" (OuterVolumeSpecName: "config-volume") pod "795af8bd-658c-4af1-9df6-ec87eb3539a7" (UID: "795af8bd-658c-4af1-9df6-ec87eb3539a7"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 13 07:45:03 crc kubenswrapper[5048]: I1213 07:45:03.718415 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/795af8bd-658c-4af1-9df6-ec87eb3539a7-kube-api-access-lwgfp" (OuterVolumeSpecName: "kube-api-access-lwgfp") pod "795af8bd-658c-4af1-9df6-ec87eb3539a7" (UID: "795af8bd-658c-4af1-9df6-ec87eb3539a7"). InnerVolumeSpecName "kube-api-access-lwgfp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:45:03 crc kubenswrapper[5048]: I1213 07:45:03.724634 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/795af8bd-658c-4af1-9df6-ec87eb3539a7-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "795af8bd-658c-4af1-9df6-ec87eb3539a7" (UID: "795af8bd-658c-4af1-9df6-ec87eb3539a7"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 13 07:45:03 crc kubenswrapper[5048]: I1213 07:45:03.813508 5048 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/795af8bd-658c-4af1-9df6-ec87eb3539a7-config-volume\") on node \"crc\" DevicePath \"\"" Dec 13 07:45:03 crc kubenswrapper[5048]: I1213 07:45:03.813968 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lwgfp\" (UniqueName: \"kubernetes.io/projected/795af8bd-658c-4af1-9df6-ec87eb3539a7-kube-api-access-lwgfp\") on node \"crc\" DevicePath \"\"" Dec 13 07:45:03 crc kubenswrapper[5048]: I1213 07:45:03.813985 5048 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/795af8bd-658c-4af1-9df6-ec87eb3539a7-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 13 07:45:04 crc kubenswrapper[5048]: I1213 07:45:04.179915 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29426865-nk9wf" event={"ID":"795af8bd-658c-4af1-9df6-ec87eb3539a7","Type":"ContainerDied","Data":"9a105b02ad2128f9fd2e47fd4941808f5e373d2771978589fc3b95e9d328154d"} Dec 13 07:45:04 crc kubenswrapper[5048]: I1213 07:45:04.179967 5048 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9a105b02ad2128f9fd2e47fd4941808f5e373d2771978589fc3b95e9d328154d" Dec 13 07:45:04 crc kubenswrapper[5048]: I1213 07:45:04.179993 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29426865-nk9wf" Dec 13 07:45:04 crc kubenswrapper[5048]: I1213 07:45:04.249173 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426820-pghsg"] Dec 13 07:45:04 crc kubenswrapper[5048]: I1213 07:45:04.260750 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29426820-pghsg"] Dec 13 07:45:04 crc kubenswrapper[5048]: I1213 07:45:04.577989 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f5a1a5d-316c-4a98-80ab-3ee688ab0672" path="/var/lib/kubelet/pods/7f5a1a5d-316c-4a98-80ab-3ee688ab0672/volumes" Dec 13 07:45:05 crc kubenswrapper[5048]: I1213 07:45:05.567112 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:45:05 crc kubenswrapper[5048]: E1213 07:45:05.568844 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:45:17 crc kubenswrapper[5048]: I1213 07:45:17.567028 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:45:17 crc kubenswrapper[5048]: E1213 07:45:17.567839 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:45:29 crc kubenswrapper[5048]: I1213 07:45:29.566749 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:45:29 crc kubenswrapper[5048]: E1213 07:45:29.567626 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:45:41 crc kubenswrapper[5048]: I1213 07:45:41.567377 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:45:41 crc kubenswrapper[5048]: E1213 07:45:41.568163 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:45:56 crc kubenswrapper[5048]: I1213 07:45:56.419429 5048 scope.go:117] "RemoveContainer" containerID="ef3d98b8906b0a293e0abe9c9bb145ae32ed4b80a4516b5d5010ba3e7427f4db" Dec 13 07:45:56 crc kubenswrapper[5048]: I1213 07:45:56.572552 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:45:56 crc kubenswrapper[5048]: E1213 07:45:56.573081 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:46:09 crc kubenswrapper[5048]: I1213 07:46:09.568051 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:46:09 crc kubenswrapper[5048]: E1213 07:46:09.572276 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:46:21 crc kubenswrapper[5048]: I1213 07:46:21.567080 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:46:21 crc kubenswrapper[5048]: E1213 07:46:21.568029 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:46:36 crc kubenswrapper[5048]: I1213 07:46:36.573409 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:46:36 crc kubenswrapper[5048]: E1213 07:46:36.574192 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:46:50 crc kubenswrapper[5048]: I1213 07:46:50.567703 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:46:50 crc kubenswrapper[5048]: E1213 07:46:50.568476 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:47:01 crc kubenswrapper[5048]: I1213 07:47:01.579969 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:47:01 crc kubenswrapper[5048]: E1213 07:47:01.581343 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:47:08 crc kubenswrapper[5048]: I1213 07:47:08.138820 5048 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-dwtw6"] Dec 13 07:47:08 crc kubenswrapper[5048]: E1213 07:47:08.140081 5048 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="795af8bd-658c-4af1-9df6-ec87eb3539a7" containerName="collect-profiles" Dec 13 07:47:08 crc kubenswrapper[5048]: I1213 07:47:08.140103 5048 state_mem.go:107] "Deleted CPUSet assignment" podUID="795af8bd-658c-4af1-9df6-ec87eb3539a7" containerName="collect-profiles" Dec 13 07:47:08 crc kubenswrapper[5048]: I1213 07:47:08.140528 5048 memory_manager.go:354] "RemoveStaleState removing state" podUID="795af8bd-658c-4af1-9df6-ec87eb3539a7" containerName="collect-profiles" Dec 13 07:47:08 crc kubenswrapper[5048]: I1213 07:47:08.142859 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dwtw6" Dec 13 07:47:08 crc kubenswrapper[5048]: I1213 07:47:08.146009 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dwtw6"] Dec 13 07:47:08 crc kubenswrapper[5048]: I1213 07:47:08.212966 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-725dj\" (UniqueName: \"kubernetes.io/projected/6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80-kube-api-access-725dj\") pod \"community-operators-dwtw6\" (UID: \"6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80\") " pod="openshift-marketplace/community-operators-dwtw6" Dec 13 07:47:08 crc kubenswrapper[5048]: I1213 07:47:08.213059 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80-catalog-content\") pod \"community-operators-dwtw6\" (UID: \"6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80\") " pod="openshift-marketplace/community-operators-dwtw6" Dec 13 07:47:08 crc kubenswrapper[5048]: I1213 07:47:08.213134 5048 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80-utilities\") pod \"community-operators-dwtw6\" (UID: \"6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80\") " pod="openshift-marketplace/community-operators-dwtw6" Dec 13 07:47:08 crc kubenswrapper[5048]: I1213 07:47:08.317816 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-725dj\" (UniqueName: \"kubernetes.io/projected/6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80-kube-api-access-725dj\") pod \"community-operators-dwtw6\" (UID: \"6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80\") " pod="openshift-marketplace/community-operators-dwtw6" Dec 13 07:47:08 crc kubenswrapper[5048]: I1213 07:47:08.317891 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80-catalog-content\") pod \"community-operators-dwtw6\" (UID: \"6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80\") " pod="openshift-marketplace/community-operators-dwtw6" Dec 13 07:47:08 crc kubenswrapper[5048]: I1213 07:47:08.317963 5048 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80-utilities\") pod \"community-operators-dwtw6\" (UID: \"6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80\") " pod="openshift-marketplace/community-operators-dwtw6" Dec 13 07:47:08 crc kubenswrapper[5048]: I1213 07:47:08.318763 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80-catalog-content\") pod \"community-operators-dwtw6\" (UID: \"6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80\") " pod="openshift-marketplace/community-operators-dwtw6" Dec 13 07:47:08 crc kubenswrapper[5048]: I1213 07:47:08.333699 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80-utilities\") pod \"community-operators-dwtw6\" (UID: \"6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80\") " pod="openshift-marketplace/community-operators-dwtw6" Dec 13 07:47:08 crc kubenswrapper[5048]: I1213 07:47:08.780981 5048 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-725dj\" (UniqueName: \"kubernetes.io/projected/6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80-kube-api-access-725dj\") pod \"community-operators-dwtw6\" (UID: \"6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80\") " pod="openshift-marketplace/community-operators-dwtw6" Dec 13 07:47:09 crc kubenswrapper[5048]: I1213 07:47:09.075407 5048 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dwtw6" Dec 13 07:47:09 crc kubenswrapper[5048]: I1213 07:47:09.536462 5048 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dwtw6"] Dec 13 07:47:09 crc kubenswrapper[5048]: I1213 07:47:09.619193 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dwtw6" event={"ID":"6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80","Type":"ContainerStarted","Data":"63382be3375aec3162177d5c24ee6bfb5f664953730e06e036fe96e1ac91071c"} Dec 13 07:47:10 crc kubenswrapper[5048]: I1213 07:47:10.629683 5048 generic.go:334] "Generic (PLEG): container finished" podID="6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80" containerID="b39645ab3eb8b762717e9926874d0f7b9f79e78c5dfd712c76e54326d56a1727" exitCode=0 Dec 13 07:47:10 crc kubenswrapper[5048]: I1213 07:47:10.629767 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dwtw6" event={"ID":"6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80","Type":"ContainerDied","Data":"b39645ab3eb8b762717e9926874d0f7b9f79e78c5dfd712c76e54326d56a1727"} Dec 13 07:47:10 crc kubenswrapper[5048]: I1213 07:47:10.634525 5048 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 13 07:47:11 crc kubenswrapper[5048]: I1213 07:47:11.647803 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dwtw6" event={"ID":"6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80","Type":"ContainerStarted","Data":"9c60598883376908f18117fe96560a8c4ea3cb922c277af70037c7b5c07df9e3"} Dec 13 07:47:12 crc kubenswrapper[5048]: I1213 07:47:12.658874 5048 generic.go:334] "Generic (PLEG): container finished" podID="6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80" containerID="9c60598883376908f18117fe96560a8c4ea3cb922c277af70037c7b5c07df9e3" exitCode=0 Dec 13 07:47:12 crc kubenswrapper[5048]: I1213 07:47:12.658963 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dwtw6" event={"ID":"6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80","Type":"ContainerDied","Data":"9c60598883376908f18117fe96560a8c4ea3cb922c277af70037c7b5c07df9e3"} Dec 13 07:47:13 crc kubenswrapper[5048]: I1213 07:47:13.672143 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dwtw6" event={"ID":"6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80","Type":"ContainerStarted","Data":"d4900149aee3438af19e132433f0a91952a948b94fb2b765c6491646514db189"} Dec 13 07:47:13 crc kubenswrapper[5048]: I1213 07:47:13.701915 5048 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-dwtw6" podStartSLOduration=3.19389492 podStartE2EDuration="5.701894544s" podCreationTimestamp="2025-12-13 07:47:08 +0000 UTC" firstStartedPulling="2025-12-13 07:47:10.633930638 +0000 UTC m=+4664.500525229" lastFinishedPulling="2025-12-13 07:47:13.141930272 +0000 UTC m=+4667.008524853" observedRunningTime="2025-12-13 07:47:13.692231803 +0000 UTC m=+4667.558826404" watchObservedRunningTime="2025-12-13 07:47:13.701894544 +0000 UTC m=+4667.568489115" Dec 13 07:47:16 crc kubenswrapper[5048]: I1213 07:47:16.580936 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:47:16 crc kubenswrapper[5048]: E1213 07:47:16.581712 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:47:19 crc kubenswrapper[5048]: I1213 07:47:19.076598 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-dwtw6" Dec 13 07:47:19 crc kubenswrapper[5048]: I1213 07:47:19.076718 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-dwtw6" Dec 13 07:47:19 crc kubenswrapper[5048]: I1213 07:47:19.121931 5048 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-dwtw6" Dec 13 07:47:19 crc kubenswrapper[5048]: I1213 07:47:19.795638 5048 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-dwtw6" Dec 13 07:47:19 crc kubenswrapper[5048]: I1213 07:47:19.863166 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dwtw6"] Dec 13 07:47:21 crc kubenswrapper[5048]: I1213 07:47:21.746678 5048 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-dwtw6" podUID="6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80" containerName="registry-server" containerID="cri-o://d4900149aee3438af19e132433f0a91952a948b94fb2b765c6491646514db189" gracePeriod=2 Dec 13 07:47:22 crc kubenswrapper[5048]: I1213 07:47:22.759552 5048 generic.go:334] "Generic (PLEG): container finished" podID="6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80" containerID="d4900149aee3438af19e132433f0a91952a948b94fb2b765c6491646514db189" exitCode=0 Dec 13 07:47:22 crc kubenswrapper[5048]: I1213 07:47:22.759718 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dwtw6" event={"ID":"6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80","Type":"ContainerDied","Data":"d4900149aee3438af19e132433f0a91952a948b94fb2b765c6491646514db189"} Dec 13 07:47:23 crc kubenswrapper[5048]: I1213 07:47:23.395177 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dwtw6" Dec 13 07:47:23 crc kubenswrapper[5048]: I1213 07:47:23.512350 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80-catalog-content\") pod \"6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80\" (UID: \"6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80\") " Dec 13 07:47:23 crc kubenswrapper[5048]: I1213 07:47:23.512504 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-725dj\" (UniqueName: \"kubernetes.io/projected/6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80-kube-api-access-725dj\") pod \"6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80\" (UID: \"6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80\") " Dec 13 07:47:23 crc kubenswrapper[5048]: I1213 07:47:23.512587 5048 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80-utilities\") pod \"6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80\" (UID: \"6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80\") " Dec 13 07:47:23 crc kubenswrapper[5048]: I1213 07:47:23.513867 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80-utilities" (OuterVolumeSpecName: "utilities") pod "6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80" (UID: "6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:47:23 crc kubenswrapper[5048]: I1213 07:47:23.520773 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80-kube-api-access-725dj" (OuterVolumeSpecName: "kube-api-access-725dj") pod "6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80" (UID: "6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80"). InnerVolumeSpecName "kube-api-access-725dj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 13 07:47:23 crc kubenswrapper[5048]: I1213 07:47:23.567331 5048 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80" (UID: "6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 13 07:47:23 crc kubenswrapper[5048]: I1213 07:47:23.614539 5048 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-725dj\" (UniqueName: \"kubernetes.io/projected/6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80-kube-api-access-725dj\") on node \"crc\" DevicePath \"\"" Dec 13 07:47:23 crc kubenswrapper[5048]: I1213 07:47:23.614585 5048 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80-utilities\") on node \"crc\" DevicePath \"\"" Dec 13 07:47:23 crc kubenswrapper[5048]: I1213 07:47:23.614598 5048 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 13 07:47:23 crc kubenswrapper[5048]: I1213 07:47:23.769124 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dwtw6" event={"ID":"6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80","Type":"ContainerDied","Data":"63382be3375aec3162177d5c24ee6bfb5f664953730e06e036fe96e1ac91071c"} Dec 13 07:47:23 crc kubenswrapper[5048]: I1213 07:47:23.769178 5048 scope.go:117] "RemoveContainer" containerID="d4900149aee3438af19e132433f0a91952a948b94fb2b765c6491646514db189" Dec 13 07:47:23 crc kubenswrapper[5048]: I1213 07:47:23.769286 5048 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dwtw6" Dec 13 07:47:23 crc kubenswrapper[5048]: I1213 07:47:23.801201 5048 scope.go:117] "RemoveContainer" containerID="9c60598883376908f18117fe96560a8c4ea3cb922c277af70037c7b5c07df9e3" Dec 13 07:47:23 crc kubenswrapper[5048]: I1213 07:47:23.807668 5048 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dwtw6"] Dec 13 07:47:23 crc kubenswrapper[5048]: I1213 07:47:23.829550 5048 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-dwtw6"] Dec 13 07:47:24 crc kubenswrapper[5048]: I1213 07:47:24.206924 5048 scope.go:117] "RemoveContainer" containerID="b39645ab3eb8b762717e9926874d0f7b9f79e78c5dfd712c76e54326d56a1727" Dec 13 07:47:24 crc kubenswrapper[5048]: I1213 07:47:24.578553 5048 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80" path="/var/lib/kubelet/pods/6d2b0bc9-045c-4f46-a08e-e8dbdf29bf80/volumes" Dec 13 07:47:30 crc kubenswrapper[5048]: I1213 07:47:30.568157 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:47:30 crc kubenswrapper[5048]: E1213 07:47:30.569228 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:47:42 crc kubenswrapper[5048]: I1213 07:47:42.567272 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:47:42 crc kubenswrapper[5048]: E1213 07:47:42.568338 5048 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-j7hns_openshift-machine-config-operator(fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b)\"" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" podUID="fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b" Dec 13 07:47:54 crc kubenswrapper[5048]: I1213 07:47:54.567361 5048 scope.go:117] "RemoveContainer" containerID="a7f376f7406108d393edb2cde004c6bfe176535074260353313082165961a8b5" Dec 13 07:47:55 crc kubenswrapper[5048]: I1213 07:47:55.086789 5048 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-j7hns" event={"ID":"fec52e92-4bc0-4a66-8671-c3d0f5c9fa5b","Type":"ContainerStarted","Data":"b54b82bc100a54ea61f40a2ed47d0a4eaf514a32370a8bdac6b318d2203f88b3"} var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515117215105024443 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015117215106017361 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015117203317016505 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015117203317015455 5ustar corecore